diff --git a/Cinder/Antelope/__init__.py b/Cinder/Antelope/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Antelope/__init__.py +++ b/Cinder/Antelope/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Antelope/constants.py b/Cinder/Antelope/constants.py index 756c839..8b54853 100644 --- a/Cinder/Antelope/constants.py +++ b/Cinder/Antelope/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Antelope/huawei_base_driver.py b/Cinder/Antelope/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Antelope/huawei_base_driver.py +++ b/Cinder/Antelope/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Antelope/huawei_conf.py b/Cinder/Antelope/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Antelope/huawei_conf.py +++ b/Cinder/Antelope/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Antelope/rest_client.py b/Cinder/Antelope/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Antelope/rest_client.py +++ b/Cinder/Antelope/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Bobcat/__init__.py b/Cinder/Bobcat/__init__.py new file mode 100644 index 0000000..7da7786 --- /dev/null +++ b/Cinder/Bobcat/__init__.py @@ -0,0 +1 @@ +"""Version: 2.6.4""" diff --git a/Cinder/Bobcat/constants.py b/Cinder/Bobcat/constants.py new file mode 100644 index 0000000..8b54853 --- /dev/null +++ b/Cinder/Bobcat/constants.py @@ -0,0 +1,200 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +STATUS_INITIALIZE = '0' +STATUS_HEALTH = '1' +LUN_TYPE = '11' +SNAPSHOT_TYPE = '27' +BLOCK_POOL_TYPE = '1' +DORADO_V6_POOL_TYPE = '0' + +HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' +LUNGROUP_PREFIX = 'OpenStack_LunGroup_' +MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' +PORTGROUP_PREFIX = 'OpenStack_PortGroup_' +QOS_NAME_PREFIX = 'OpenStack_' +SENSITIVE_KEYS = ['auth_password'] + +FC_PORT_CONNECTED = '10' +FC_INIT_ONLINE = '27' +FC_INITIATOR_NOT_EXIST = 1077948996 +ERROR_PARAMETER_ERROR = 50331651 +PARENT_TYPE_HOST = 21 +CAPACITY_UNIT = 1024 * 1024 * 2 +DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 +DEFAULT_WAIT_INTERVAL = 5 +MAX_NAME_LENGTH = 31 +SOCKET_TIMEOUT = 52 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 +PWD_EXPIRED_OR_INITIAL = (3, 4) + +LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') +SNAPSHOT_STATUS = ( + SNAPSHOT_INITIALIZING, SNAPSHOT_ACTIVATED, SNAPSHOT_UNACTIVATED +) = ('53', '43', '45') + +MIGRATION_STATUS_IN_PROCESS = ( + MIGRATION_NORMAL, MIGRATION_QUEUING, MIGRATION_MIGRATING +) = ('1', '37', '75') +MIGRATION_STATUS_COMPLETE = (MIGRATION_COMPLETE,) = ('76',) +LUNCOPY_STATUS_COMPLETE = (LUNCOPY_COMPLETE,) = ('40',) + +ERROR_CONNECT_TO_SERVER = -403 +ERROR_UNAUTHORIZED_TO_SERVER = -401 +ERROR_DEVICE_COMMUNICATE = 4294967297 +OBJECT_NAME_ALREADY_EXIST = 1077948993 +OBJECT_ID_NOT_UNIQUE = 1077948997 +ERROR_VOLUME_NOT_EXIST = 1077939726 +ERROR_LUN_NOT_EXIST = 1077936859 +SNAPSHOT_NOT_EXIST = 1077937880 +OBJECT_NOT_EXIST = 1077948996 +HYPERMETRO_NOT_EXIST = 1077674242 +HYPERMETRO_NOT_IN_GROUP = 1077675021 +HYPERMETROGROUP_NOT_EXIST = 1077675010 +HYPERMETRO_ALREADY_IN_GROUP = 1077675038 +NO_HYPERMETRO_EXIST_IN_GROUP = 1077675022 +HOSTGROUP_NOT_IN_MAPPINGVIEW = 1073804552 +PORTGROUP_NOT_IN_MAPPINGVIEW = 1073804553 +LUNGROUP_NOT_IN_MAPPINGVIEW = 1073804554 +MIGRATION_NOT_EXIST = 1073806607 +LUNCOPY_NOT_EXIST = 50338560 +LUNCOPY_ALREADY_STOPPED = 1077950178 +LUNCOPY_COMPLETED = 1077950180 +PORTGROUP_NOT_EXIST = 1077951832 +HOSTGROUP_NOT_EXIST = 1077937500 +HOST_NOT_IN_HOSTGROUP = 1073745412 +PORT_NOT_IN_PORTGROUP = 1073807618 +INITIATOR_NOT_IN_HOST = 1077950342 +HOST_NOT_EXIST = 1077937498 +MAPPINGVIEW_NOT_EXIST = 1077951819 +HOST_ALREADY_IN_HOSTGROUP = 1077937501 +PORT_ALREADY_IN_PORTGROUP = 1077951833 +HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 +PORTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804558 +LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 +LUN_ALREADY_IN_LUNGROUP = 1077936862 +ERROR_VOLUME_TIMEOUT = 1077949001 +GET_VOLUME_WAIT_INTERVAL = 30 +CREATE_HYPERMETRO_TIMEOUT = 1077949006 +HYPERMETRO_ALREADY_EXIST = 1077674256 +ERROR_VOLUME_ALREADY_EXIST = 1077948993 + +RELOGIN_ERROR_CODE = (ERROR_CONNECT_TO_SERVER, ERROR_UNAUTHORIZED_TO_SERVER, + ERROR_DEVICE_COMMUNICATE) + +METRO_RUNNING_STATUS = (METRO_RUNNING_NORMAL, METRO_RUNNING_SYNC, + METRO_RUNNING_STOP, RUNNING_TO_BE_SYNC + ) = ('1', '23', '41', '100') +METRO_HEALTH_NORMAL = '1' + +THICK_LUNTYPE = '0' +THIN_LUNTYPE = '1' +LUN_TYPE_MAP = {'Thick': THICK_LUNTYPE, + 'Thin': THIN_LUNTYPE} + +QOS_INACTIVATED = '45' +LOWER_LIMIT_KEYS = ('MINIOPS', 'LATENCY', 'MINBANDWIDTH') +UPPER_LIMIT_KEYS = ('MAXIOPS', 'MAXBANDWIDTH') + +REPLICA_SYNC_MODEL = '1' +REPLICA_ASYNC_MODEL = '2' +REPLICA_SPEED = '2' +REPLICA_PERIOD = '3600' +REPLICA_SECOND_RO = '2' +REPLICA_SECOND_RW = '3' +REPLICA_CG_PERIOD = '60' + +REPLICA_RUNNING_STATUS_SYNC = '23' +REPLICA_RUNNING_STATUS_NORMAL = '1' +REPLICA_RUNNING_STATUS_SPLIT = '26' +REPLICA_RUNNING_STATUS_INTERRUPTED = '34' +REPLICA_SECRES_DATA_SYNC = '1' +REPLICA_SECRES_DATA_COMPLETE = '2' +REPLICA_HEALTH_STATUS_NORMAL = '1' + +REPLICATION_PAIR_NOT_EXIST = 1077937923 +REPLICATION_GROUP_NOT_EXIST = 1077937924 +REPLICATION_PAIR_NOT_GROUP_MEMBER = 1077937927 +REPLICATION_GROUP_IS_EMPTY = 1077937960 +CLONE_PAIR_SYNC_COMPLETE = 1073798176 +CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 + +VALID_PRODUCT = ('V3', 'V5', '18000', 'Dorado', 'V6') +TIER_DISK_TYPES = ('ssd', 'sas', 'nl_sas') +DORADO_V6_AND_V6_PRODUCT = ('Dorado', 'V6') + +AVAILABLE_FEATURE_STATUS = (1, 2) +CHECK_FEATURES = { + 'SmartTier': None, + 'SmartThin': None, + 'SmartQoS': 'ioclass', + 'SmartPartition': 'cachepartition', + 'SmartCache': 'smartcachepartition', + 'SmartMigration': 'LUN_MIGRATION', + 'HyperMetro': 'HyperMetroPair', + 'HyperReplication': 'REPLICATIONPAIR', + 'HyperSnap': 'snapshot', + 'HyperCopy': 'LUNCOPY', + 'SmartDedupe[\s\S]*LUN': None, + 'SmartCompression[\s\S]*LUN': None, + 'Effective Capacity': None, +} + +LUN_COPY_SPEED_TYPES = ( + LUN_COPY_SPEED_LOW, + LUN_COPY_SPEED_MEDIUM, + LUN_COPY_SPEED_HIGH, + LUN_COPY_SPEED_HIGHEST +) = ('1', '2', '3', '4') +DEFAULT_CLONE_MODE = "luncopy" + +HYPER_SYNC_SPEED_TYPES = ( + HYPER_SYNC_SPEED_LOW, + HYPER_SYNC_SPEED_MEDIUM, + HYPER_SYNC_SPEED_HIGH, + HYPER_SYNC_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +REPLICA_SYNC_SPEED_TYPES = ( + REPLICA_SYNC_SPEED_LOW, + REPLICA_SYNC_SPEED_MEDIUM, + REPLICA_SYNC_SPEED_HIGH, + REPLICA_SYNC_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +CLONE_STATUS_HEALTH = '0' +CLONE_STATUS_COMPLETE = (CLONE_COMPLETE,) = ('2',) +CLONE_PAIR_NOT_EXIST = "1073798147" +SUPPORT_CLONE_PAIR_VERSION = "V600R003C00" +GET_PATCH_NUM = 100 + +DEFAULT_MINIMUM_FC_INITIATOR_ONLINE = 0 + +SNAPSHOT_HEALTH_STATUS = ( + SNAPSHOT_HEALTH_STATUS_NORMAL, + SNAPSHOT_HEALTH_STATUS_FAULTY) = ('1', '2') +SNAPSHOT_RUNNING_STATUS = ( + SNAPSHOT_RUNNING_STATUS_ACTIVATED, + SNAPSHOT_RUNNING_STATUS_ROLLINGBACK) = ('43', '44') +SNAPSHOT_ROLLBACK_PROGRESS_FINISH = '100' +SNAPSHOT_ROLLBACK_SPEED_TYPES = ( + SNAPSHOT_ROLLBACK_SPEED_LOW, + SNAPSHOT_ROLLBACK_SPEED_MEDIUM, + SNAPSHOT_ROLLBACK_SPEED_HIGH, + SNAPSHOT_ROLLBACK_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +INBAND_LUN_TYPE = '5' diff --git a/Cinder/Bobcat/huawei_base_driver.py b/Cinder/Bobcat/huawei_base_driver.py new file mode 100644 index 0000000..3ff5fc1 --- /dev/null +++ b/Cinder/Bobcat/huawei_base_driver.py @@ -0,0 +1,841 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import six +import uuid + +from oslo_config import cfg +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields + +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_conf +from cinder.volume.drivers.huawei import huawei_flow +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import rest_client + +LOG = logging.getLogger(__name__) + +huawei_opts = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='The configuration file for Huawei driver.'), + cfg.DictOpt('hypermetro_device', + secret=True, + help='To represent a hypermetro target device, which takes ' + 'standard dict config form: hypermetro_device = ' + 'key1:value1,key2:value2...'), + cfg.BoolOpt('retain_storage_mapping', + default=False, + help='Whether to retain the storage mapping when the last ' + 'volume on the host is unmapped'), +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts) + + +class HuaweiBaseDriver(object): + VERSION = "2.6.4" + + def __init__(self, *args, **kwargs): + super(HuaweiBaseDriver, self).__init__(*args, **kwargs) + + if not self.configuration: + msg = _('Configuration is not found.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.configuration.append_config_values(huawei_opts) + + self.active_backend_id = kwargs.get('active_backend_id') + self.conf = huawei_conf.HuaweiConf(self.configuration) + self.local_cli = None + self.hypermetro_rmt_cli = None + self.replication_rmt_cli = None + self.support_capability = {} + self.configuration.is_dorado_v6 = False + + def do_setup(self, context): + self.conf.update_config_value() + + config_dict = { + 'san_address': self.configuration.san_address, + 'san_user': self.configuration.san_user, + 'san_password': self.configuration.san_password, + 'vstore_name': self.configuration.vstore_name, + 'ssl_cert_verify': self.configuration.ssl_cert_verify, + 'ssl_cert_path': self.configuration.ssl_cert_path, + 'in_band_or_not': self.configuration.in_band_or_not, + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore + } + self.local_cli = rest_client.RestClient(config_dict) + self.local_cli.login() + self.configuration.is_dorado_v6 = huawei_utils.is_support_clone_pair( + self.local_cli) + + for c in constants.CHECK_FEATURES: + self.support_capability[c] = False + + if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) + self.hypermetro_rmt_cli = rest_client.RestClient( + self.configuration.hypermetro) + self.hypermetro_rmt_cli.login() + + if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) + self.replication_rmt_cli = rest_client.RestClient( + self.configuration.replication) + self.replication_rmt_cli.login() + + def check_for_setup_error(self): + def _check_storage_pools(client, config_pools): + pools = client.get_all_pools() + pool_names = [p['NAME'] for p in pools if + p.get('USAGETYPE', constants.BLOCK_POOL_TYPE) in + (constants.BLOCK_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE)] + + for pool_name in config_pools: + if pool_name not in pool_names: + msg = _('Storage pool %s does not exist.') % pool_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + _check_storage_pools(self.local_cli, self.configuration.storage_pools) + if self.configuration.hypermetro: + _check_storage_pools( + self.hypermetro_rmt_cli, + self.configuration.hypermetro['storage_pools']) + if self.configuration.replication: + _check_storage_pools( + self.replication_rmt_cli, + self.configuration.replication['storage_pools']) + + # If host is failed-over, switch the local and remote client. + if (self.configuration.replication and self.active_backend_id == + self.configuration.replication['backend_id']): + self._switch_replication_clients() + + def backup_use_temp_snapshot(self): + return self.configuration.safe_get("backup_use_temp_snapshot") + + def create_export(self, context, volume, connector=None): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def create_export_snapshot(self, context, snapshot, connector): + pass + + def remove_export_snapshot(self, context, snapshot): + pass + + def _get_capacity(self, pool_info): + """Get free capacity and total capacity of the pool.""" + free = pool_info.get('DATASPACE', pool_info['USERFREECAPACITY']) + total = pool_info.get('USERTOTALCAPACITY') + provisioned = 0 + if 'totalSizeWithoutSnap' in pool_info: + provisioned = pool_info['totalSizeWithoutSnap'] + elif 'LUNCONFIGEDCAPACITY' in pool_info: + provisioned = pool_info['LUNCONFIGEDCAPACITY'] + return (float(total) / constants.CAPACITY_UNIT, + float(free) / constants.CAPACITY_UNIT, + float(provisioned) / constants.CAPACITY_UNIT) + + def _get_disk_type(self, pool_info): + """Get disk type of the pool.""" + pool_disks = [] + for i, x in enumerate(constants.TIER_DISK_TYPES): + if (pool_info.get('TIER%dCAPACITY' % i) and + pool_info.get('TIER%dCAPACITY' % i) != '0'): + pool_disks.append(x) + + if len(pool_disks) > 1: + pool_disks = ['mix'] + + return pool_disks[0] if pool_disks else None + + def _get_smarttier(self, disk_type): + return disk_type is not None and disk_type == 'mix' + + def _get_pool_stats(self, pool_name, pool): + pool_info = self.local_cli.get_pool_by_name(pool_name) + if not pool_info: + return pool + total_capacity, free_capacity, provisioned_capacity = ( + self._get_capacity(pool_info)) + disk_type = self._get_disk_type(pool_info) + tier_support = self._get_smarttier(disk_type) + + pool['total_capacity_gb'] = total_capacity + pool['free_capacity_gb'] = free_capacity + pool['smarttier'] = (self.support_capability['SmartTier'] and + tier_support) + pool['provisioned_capacity_gb'] = provisioned_capacity + if disk_type: + pool['disk_type'] = disk_type + + return pool + + def _update_pool_stats(self): + pools = [] + for pool_name in self.configuration.storage_pools: + pool = { + 'pool_name': pool_name, + 'reserved_percentage': + self.configuration.reserved_percentage, + 'max_over_subscription_ratio': + self.configuration.max_over_subscription_ratio, + 'smartpartition': + self.support_capability['SmartPartition'], + 'smartcache': self.support_capability['SmartCache'], + 'QoS_support': self.support_capability['SmartQoS'], + 'thin_provisioning_support': + self.support_capability['SmartThin'], + 'thick_provisioning_support': True, + 'hypermetro': self.support_capability['HyperMetro'], + 'consistentcygroup_support': True, + 'consistent_group_snapshot_enabled': + self.support_capability['HyperSnap'], + 'location_info': self.local_cli.device_id, + 'replication_enabled': + self.support_capability['HyperReplication'], + 'replication_type': ['sync', 'async'], + 'multiattach': True, + 'dedup': [self.support_capability['SmartDedupe[\s\S]*LUN'], + False], + 'compression': + [self.support_capability['SmartCompression[\s\S]*LUN'], + False], + 'huawei_controller': True, + 'huawei_application_type': False, + } + + if (self.configuration.san_product == "Dorado" or + self.configuration.san_product == "V6"): + pool['thick_provisioning_support'] = False + pool['huawei_application_type'] = True + + pool = self._get_pool_stats(pool_name, pool) + pools.append(pool) + + return pools + + def _update_hypermetro_capability(self): + if self.hypermetro_rmt_cli: + feature_status = self.hypermetro_rmt_cli.get_feature_status() + if (feature_status.get('HyperMetro') not in + constants.AVAILABLE_FEATURE_STATUS): + self.support_capability['HyperMetro'] = False + else: + self.support_capability['HyperMetro'] = False + + def _update_replication_capability(self): + self.support_capability['RemoteHyperReplication'] = False + if self.replication_rmt_cli: + feature_status = self.replication_rmt_cli.get_feature_status() + if (feature_status.get('HyperReplication') in + constants.AVAILABLE_FEATURE_STATUS): + self.support_capability['RemoteHyperReplication'] = True + + self.support_capability['HyperReplication'] = ( + self.support_capability['RemoteHyperReplication'] and + self.support_capability['HyperReplication']) + + def _update_support_capability(self): + feature_status = self.local_cli.get_feature_status() + + for c in constants.CHECK_FEATURES: + for f in feature_status: + if re.match(c, f): + self.support_capability[c] = ( + feature_status[f] in + constants.AVAILABLE_FEATURE_STATUS) + break + else: + if constants.CHECK_FEATURES[c]: + self.support_capability[c] = self.local_cli.check_feature( + constants.CHECK_FEATURES[c]) + + if (self.support_capability["Effective Capacity"] or + self.configuration.is_dorado_v6): + self.support_capability["SmartDedupe[\s\S]*LUN"] = True + self.support_capability["SmartCompression[\s\S]*LUN"] = True + + self._update_hypermetro_capability() + self._update_replication_capability() + + LOG.debug('Update backend capabilities: %s.', self.support_capability) + + def _update_volume_stats(self): + self._update_support_capability() + pools = self._update_pool_stats() + + self._stats['pools'] = pools + self._stats['volume_backend_name'] = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + self._stats['driver_version'] = self.VERSION + self._stats['vendor_name'] = 'Huawei' + self._stats['replication_enabled'] = ( + self.support_capability['RemoteHyperReplication'] and + self.support_capability['HyperReplication']) + if self._stats['replication_enabled']: + self._stats['replication_targets'] = ( + [self.configuration.replication['backend_id']]) + + def get_volume_stats(self): + """Get volume status and reload huawei config file.""" + self.conf.update_config_value() + self._update_volume_stats() + + def create_volume(self, volume): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume( + volume, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def delete_volume(self, volume): + try: + huawei_flow.delete_volume( + volume, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration) + except Exception as exc: + if huawei_utils.is_not_exist_exc(exc): + return + LOG.exception('Delete volume %s failed.', volume.id) + raise + + def migrate_volume(self, ctxt, volume, host): + try: + huawei_flow.migrate_volume(volume, host, self.local_cli, + self.support_capability, + self.configuration) + except Exception: + LOG.exception('Migrate volume %s by backend failed.', volume.id) + return False, {} + + return True, {} + + def _change_lun_name(self, lun_id, rmt_lun_id, new_name, description=None): + if rmt_lun_id: + self.hypermetro_rmt_cli.rename_lun(rmt_lun_id, new_name, description) + self.local_cli.rename_lun(lun_id, new_name, description) + + def _get_lun_id(self, volume, metadata, new_metadata): + """ + same storage situation, if new_volume is not + hypermetro, we don't need to change remote lun name + """ + rmt_lun_id = None + if metadata.get('hypermetro') and new_metadata.get('hypermetro'): + rmt_lun_info = huawei_utils.get_lun_info( + self.hypermetro_rmt_cli, volume) + rmt_lun_id = rmt_lun_info.get('ID') + lun_info = huawei_utils.get_lun_info( + self.local_cli, volume) + return lun_info.get('ID'), rmt_lun_id + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + original_name = huawei_utils.encode_name(volume.id) + new_name = huawei_utils.encode_name(new_volume.id) + org_metadata = huawei_utils.get_volume_private_data(volume) + new_metadata = huawei_utils.get_volume_private_data(new_volume) + new_lun_id, new_rmt_lun_id = self._get_lun_id(new_volume, new_metadata, new_metadata) + + try: + if org_metadata.get('huawei_sn') == new_metadata.get('huawei_sn'): + lun_id, rmt_lun_id = self._get_lun_id(volume, org_metadata, new_metadata) + src_lun_name = new_name[:-4] + '-org' + self._change_lun_name(lun_id, rmt_lun_id, src_lun_name) + self._change_lun_name(new_lun_id, new_rmt_lun_id, original_name, volume.name) + self._change_lun_name(lun_id, rmt_lun_id, new_name) + else: + self._change_lun_name(new_lun_id, new_rmt_lun_id, original_name, volume.name) + except Exception: + LOG.exception('Unable to rename lun %(id)s to %(name)s.', + {'id': new_metadata['huawei_lun_id'], + 'name': original_name}) + name_id = new_volume.name_id + else: + LOG.info("Successfully rename lun %(id)s to %(name)s.", + {'id': new_metadata['huawei_lun_id'], + 'name': original_name}) + name_id = None + + return {'_name_id': name_id, + 'provider_location': huawei_utils.to_string(**new_metadata), + } + + def create_volume_from_snapshot(self, volume, snapshot): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume_from_snapshot( + volume, snapshot, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def create_cloned_volume(self, volume, src_vref): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume_from_volume( + volume, src_vref, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def extend_volume(self, volume, new_size): + huawei_flow.extend_volume( + volume, new_size, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration) + + def create_snapshot(self, snapshot): + snapshot_id, snapshot_wwn = huawei_flow.create_snapshot( + snapshot, self.local_cli, self.support_capability) + self.local_cli.activate_snapshot(snapshot_id) + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + return {'provider_location': location} + + def delete_snapshot(self, snapshot): + try: + huawei_flow.delete_snapshot(snapshot, self.local_cli) + except Exception as exc: + if huawei_utils.is_not_exist_exc(exc): + return + LOG.exception('Delete snapshot %s failed.', snapshot.id) + raise + + def retype(self, ctxt, volume, new_type, diff, host): + LOG.info('Start volume %(id)s retype. new_type: %(new_type)s, ' + 'diff: %(diff)s, host: %(host)s.', + {'id': volume.id, 'new_type': new_type, + 'diff': diff, 'host': host}) + + orig_lun_info = huawei_utils.get_lun_info(self.local_cli, volume) + if not orig_lun_info: + msg = _("Volume %s does not exist.") % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_opts = huawei_utils.get_volume_type_params( + new_type, self.configuration.is_dorado_v6) + if new_opts['compression'] is None: + new_opts['compression'] = (self.configuration.san_product + == "Dorado") + if new_opts['dedup'] is None: + new_opts['dedup'] = self.configuration.san_product == "Dorado" + + if huawei_utils.need_migrate(volume, host, new_opts, orig_lun_info): + hypermetro_id, replication_id = huawei_flow.retype_by_migrate( + volume, new_opts, host, self.local_cli, + self.hypermetro_rmt_cli, self.replication_rmt_cli, + self.configuration, self.support_capability) + else: + hypermetro_id, replication_id = huawei_flow.retype( + volume, new_opts, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, hypermetro_id=hypermetro_id, replication_id=replication_id) + + return True, model_update + + def manage_existing_get_size(self, volume, existing_ref): + lun_info = huawei_utils.get_external_lun_info(self.local_cli, + existing_ref) + if not lun_info: + msg = _("Lun %s to manage not exist.") % existing_ref + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remainder = float(lun_info['CAPACITY']) % constants.CAPACITY_UNIT + if remainder > 0: + msg = _("LUN size must be times of 1GB.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + size = float(lun_info['CAPACITY']) / constants.CAPACITY_UNIT + return int(size) + + def manage_existing(self, volume, existing_ref): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.manage_existing( + volume, existing_ref, self.local_cli, + self.hypermetro_rmt_cli, self.replication_rmt_cli, + self.configuration, self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + snapshot_info = huawei_utils.get_external_snapshot_info( + self.local_cli, existing_ref) + if not snapshot_info: + msg = _("Snapshot %s not exist.") % existing_ref + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remainder = float(snapshot_info['USERCAPACITY'] + ) % constants.CAPACITY_UNIT + if remainder > 0: + msg = _("Snapshot size must be times of 1GB.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + size = float(snapshot_info['USERCAPACITY']) / constants.CAPACITY_UNIT + return int(size) + + def manage_existing_snapshot(self, snapshot, existing_ref): + snapshot_id, snapshot_wwn = huawei_flow.manage_existing_snapshot( + snapshot, existing_ref, self.local_cli) + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + return {'provider_location': location} + + def create_group(self, context, group): + huawei_flow.create_group( + group, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + return {'status': fields.GroupStatus.AVAILABLE} + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + if self.configuration.clone_mode == "fastclone": + msg = ("Can't config fastclone when create " + "consisgroup from cgsnapshot or consisgroup") + LOG.error(msg) + raise exception.VolumeBackendAPIException(msg) + + model_update = self.create_group(context, group) + volumes_model_update = [] + delete_snapshots = False + + if not snapshots and source_vols: + snapshots = [] + for src_vol in source_vols: + vol_kwargs = { + 'id': src_vol.id, + 'provider_location': src_vol.provider_location, + } + snapshot_kwargs = {'id': six.text_type(uuid.uuid4()), + 'volume': objects.Volume(**vol_kwargs), + 'volume_size': src_vol.size} + snapshot = objects.Snapshot(**snapshot_kwargs) + snapshots.append(snapshot) + + snapshots_model_update = self._create_group_snapshot(snapshots) + for i, model in enumerate(snapshots_model_update): + snapshot = snapshots[i] + snapshot.provider_location = model['provider_location'] + + delete_snapshots = True + + if snapshots: + try: + for i, vol in enumerate(volumes): + snapshot = snapshots[i] + vol_model_update = self.create_volume_from_snapshot( + vol, snapshot) + vol_model_update.update({'id': vol.id}) + volumes_model_update.append(vol_model_update) + finally: + if delete_snapshots: + self._delete_group_snapshot(snapshots) + + return model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + opts = huawei_utils.get_group_type_params( + group, self.configuration.is_dorado_v6) + + hypermetro_group = any(opt for opt in opts if opt.get('hypermetro')) + if hypermetro_group: + hypermetro_mgr = hypermetro.HuaweiHyperMetro( + self.local_cli, self.hypermetro_rmt_cli, + self.configuration) + hypermetro_mgr.delete_consistencygroup(group.id, volumes) + + replication_group = any(opt for opt in opts + if opt.get('replication_enabled')) + if replication_group: + replication_mgr = replication.ReplicationManager( + self.local_cli, self.replication_rmt_cli, + self.configuration) + replication_mgr.delete_group(group.id, volumes) + + model_update = {'status': fields.GroupStatus.DELETED} + + volumes_model_update = [] + for volume in volumes: + update = {'id': volume.id} + try: + self.delete_volume(volume) + update['status'] = 'deleted' + except Exception: + update['status'] = 'error_deleting' + finally: + volumes_model_update.append(update) + + return model_update, volumes_model_update + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + opts = huawei_utils.get_group_type_params( + group, self.configuration.is_dorado_v6) + + hypermetro_group = any(opt for opt in opts if opt.get('hypermetro')) + if hypermetro_group: + hypermetro_mgr = hypermetro.HuaweiHyperMetro( + self.local_cli, self.hypermetro_rmt_cli, + self.configuration) + hypermetro_mgr.update_consistencygroup( + group.id, add_volumes, remove_volumes) + + replication_group = any(opt for opt in opts + if opt.get('replication_enabled')) + if replication_group: + replication_mgr = replication.ReplicationManager( + self.local_cli, self.replication_rmt_cli, + self.configuration) + replication_mgr.update_group( + group.id, add_volumes, remove_volumes) + + model_update = {'status': fields.GroupStatus.AVAILABLE} + + return model_update, None, None + + def create_group_snapshot(self, context, group_snapshot, snapshots): + try: + snapshots_model_update = self._create_group_snapshot(snapshots) + except Exception: + LOG.exception("Failed to create snapshots for group %s.", + group_snapshot.id) + raise + + model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} + return model_update, snapshots_model_update + + def _create_group_snapshot(self, snapshots): + snapshots_model_update = [] + created_snapshots = [] + + for snapshot in snapshots: + try: + snapshot_id, snapshot_wwn = huawei_flow.create_snapshot( + snapshot, self.local_cli, self.support_capability) + except Exception: + LOG.exception("Failed to create snapshot %s of group.", + snapshot.id) + for snap_id in created_snapshots: + self.local_cli.delete_snapshot(snap_id) + raise + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + snap_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.AVAILABLE, + 'provider_location': location, + } + snapshots_model_update.append(snap_model_update) + created_snapshots.append(snapshot_id) + + try: + self.local_cli.activate_snapshot(created_snapshots) + except Exception: + LOG.exception("Failed to activate group snapshots %s.", + created_snapshots) + for snap_id in created_snapshots: + self.local_cli.delete_snapshot(snap_id) + raise + + return snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + try: + snapshots_model_update = self._delete_group_snapshot(snapshots) + except Exception: + LOG.exception("Failed to delete snapshots for group %s.", + group_snapshot.id) + raise + + model_update = {'status': fields.GroupSnapshotStatus.DELETED} + return model_update, snapshots_model_update + + def _delete_group_snapshot(self, snapshots): + snapshots_model_update = [] + for snapshot in snapshots: + try: + self.delete_snapshot(snapshot) + snapshot_model = {'id': snapshot.id, + 'status': fields.SnapshotStatus.DELETED} + snapshots_model_update.append(snapshot_model) + except Exception: + LOG.exception("Failed to delete snapshot %s of group.", + snapshot.id) + raise + + return snapshots_model_update + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + if secondary_id == 'default': + if not self.active_backend_id: + return None, [], [] + + volumes_update = huawei_flow.failback( + volumes, self.local_cli, self.replication_rmt_cli, + self.configuration) + secondary_id = '' + elif secondary_id in ( + None, self.configuration.replication['backend_id']): + if (self.active_backend_id == + self.configuration.replication['backend_id']): + # Already failover, return success + return self.active_backend_id, [], [] + + volumes_update = huawei_flow.failover( + volumes, self.local_cli, self.replication_rmt_cli, + self.configuration) + secondary_id = self.configuration.replication['backend_id'] + else: + msg = "Invalid secondary id %s." % secondary_id + raise exception.InvalidReplicationTarget(reason=msg) + + self.active_backend_id = secondary_id + self._switch_replication_clients() + + return secondary_id, volumes_update, [] + + def _switch_replication_clients(self): + self.local_cli, self.replication_rmt_cli = ( + self.replication_rmt_cli, self.local_cli) + (self.configuration.iscsi_info, + self.configuration.replication['iscsi_info']) = ( + self.configuration.replication['iscsi_info'], + self.configuration.iscsi_info + ) + + def _change_same_host_lun_id(self, local_mapping, remote_mapping): + loc_aval_host_lun_ids = local_mapping.get('aval_host_lun_ids', []) + rmt_aval_host_lun_ids = remote_mapping.get('aval_host_lun_ids', []) + + if local_mapping['hostlun_id'] == remote_mapping['hostlun_id']: + return local_mapping['hostlun_id'] + + for i in range(1, 512): + if i in loc_aval_host_lun_ids and i in rmt_aval_host_lun_ids: + same_host_lun_id = i + break + else: + same_host_lun_id = None + + if not same_host_lun_id: + msg = _("Can't find common host lun id for hypermetro volume.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.local_cli.change_hostlun_id( + local_mapping['mappingview_id'], local_mapping['lun_id'], + same_host_lun_id) + self.hypermetro_rmt_cli.change_hostlun_id( + remote_mapping['mappingview_id'], remote_mapping['lun_id'], + same_host_lun_id) + return same_host_lun_id + + def _merge_iscsi_mapping(self, local_mapping, remote_mapping, + same_host_lun_id): + local_mapping['target_iqns'].extend(remote_mapping['target_iqns']) + local_mapping['target_portals'].extend( + remote_mapping['target_portals']) + local_mapping['target_luns'] = [same_host_lun_id] * len( + local_mapping['target_portals']) + return local_mapping + + def _merge_fc_mapping(self, local_mapping, remote_mapping, + same_host_lun_id): + self._merge_ini_tgt_map(local_mapping['initiator_target_map'], + remote_mapping['initiator_target_map']) + local_mapping['target_lun'] = same_host_lun_id + local_mapping['target_wwn'] += remote_mapping['target_wwn'] + + return local_mapping + + def _merge_ini_tgt_map(self, loc, rmt): + for k in rmt: + loc[k] = loc.get(k, []) + rmt[k] + + def _is_volume_multi_attach_to_same_host(self, volume, connector): + attachments = volume.volume_attachment + if volume.multiattach and len(attachments) > 1 and sum( + 1 for a in attachments if a.connector == connector) > 1: + LOG.info("Volume is multi-attach and attached to the same host" + " multiple times") + return + + def revert_to_snapshot(self, context, volume, snapshot): + huawei_flow.revert_to_snapshot(snapshot, self.local_cli, + self.configuration.rollback_speed) diff --git a/Cinder/Bobcat/huawei_conf.py b/Cinder/Bobcat/huawei_conf.py new file mode 100644 index 0000000..d93775c --- /dev/null +++ b/Cinder/Bobcat/huawei_conf.py @@ -0,0 +1,655 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Set Huawei private configuration into Configuration object. + +For conveniently get private configuration. We parse Huawei config file +and set every property into Configuration object as an attribute. +""" + +import base64 +import os +import re + +from lxml import etree as ET +from oslo_log import log as logging +import six + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class HuaweiConf(object): + def __init__(self, conf): + self.conf = conf + self.last_modify_time = None + + def get_xml_info(self): + tree = ET.parse(self.conf.cinder_huawei_conf_file, + ET.XMLParser(resolve_entities=False)) + xml_root = tree.getroot() + return tree, xml_root + + def update_config_value(self): + file_time = os.stat(self.conf.cinder_huawei_conf_file).st_mtime + if self.last_modify_time == file_time: + return + + self.last_modify_time = file_time + tree, xml_root = self.get_xml_info() + self._encode_authentication(tree, xml_root) + + attr_funcs = ( + self._san_address, + self._san_user, + self._san_password, + self._san_vstore, + self._san_product, + self._ssl_cert_path, + self._ssl_cert_verify, + self._iscsi_info, + self._fc_info, + self._hyper_pair_sync_speed, + self._replication_pair_sync_speed, + self._hypermetro_devices, + self._replication_devices, + self._lun_type, + self._lun_write_type, + self._lun_prefetch, + self._storage_pools, + self._force_delete_volume, + self._lun_copy_speed, + self._lun_copy_mode, + self._lun_copy_wait_interval, + self._lun_timeout, + self._get_minimum_fc_initiator, + self._hyper_enforce_multipath, + self._rollback_speed, + self._get_local_in_band_or_not, + self._get_local_storage_sn, + self._set_qos_ignored_param, + self._get_rest_client_semaphore, + ) + + for f in attr_funcs: + f(xml_root) + + def _encode_authentication(self, tree, xml_root): + name_node = xml_root.find('Storage/UserName') + pwd_node = xml_root.find('Storage/UserPassword') + vstore_node = xml_root.find('Storage/vStoreName') + + need_encode = False + if name_node is not None and not name_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(name_node.text)).decode() + name_node.text = '!$$$' + encoded + need_encode = True + + if pwd_node is not None and not pwd_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(pwd_node.text)).decode() + pwd_node.text = '!$$$' + encoded + need_encode = True + + if vstore_node is not None and not vstore_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(vstore_node.text)).decode() + vstore_node.text = '!$$$' + encoded + need_encode = True + + if need_encode: + tree.write(self.conf.cinder_huawei_conf_file, encoding='UTF-8') + + def _san_address(self, xml_root): + text = xml_root.findtext('Storage/RestURL') + if not text: + msg = _("RestURL is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + addrs = list(set([x.strip() for x in text.split(';') if x.strip()])) + setattr(self.conf, 'san_address', addrs) + + def _san_user(self, xml_root): + text = xml_root.findtext('Storage/UserName') + if not text: + msg = _("UserName is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + user = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_user', user) + + def _san_password(self, xml_root): + text = xml_root.findtext('Storage/UserPassword') + if not text: + msg = _("UserPassword is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pwd = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_password', pwd) + + def _san_vstore(self, xml_root): + vstore = None + text = xml_root.findtext('Storage/vStoreName') + if text: + vstore = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'vstore_name', vstore) + + @staticmethod + def _parser_ssl_value(ssl_value): + if ssl_value.lower() in ('true', 'false'): + return ssl_value.lower() == 'true' + else: + msg = _("SSLCertVerify configured error.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _get_ssl_verify(self, xml_root): + value = False + text = xml_root.findtext('Storage/SSLCertVerify') + if text: + value = self._parser_ssl_value(text) + return value + + def _ssl_cert_path(self, xml_root): + text = xml_root.findtext('Storage/SSLCertPath') + ssl_value = self._get_ssl_verify(xml_root) + if text and ssl_value: + setattr(self.conf, 'ssl_cert_path', text) + elif not text and ssl_value: + msg = _("Cert path is necessary if SSLCertVerify is True.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + else: + setattr(self.conf, 'ssl_cert_path', None) + + def _ssl_cert_verify(self, xml_root): + value = self._get_ssl_verify(xml_root) + setattr(self.conf, 'ssl_cert_verify', value) + + def _set_extra_constants_by_product(self, product): + extra_constants = {} + if product in constants.DORADO_V6_AND_V6_PRODUCT: + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'minIOPS', + 'maxBandWidth', 'minBandWidth', + 'burstIOPS', 'burstBandWidth', 'burstTime', + 'IOType') + extra_constants['QOS_IOTYPES'] = ('2',) + extra_constants['SUPPORT_LUN_TYPES'] = ('Thin',) + extra_constants['DEFAULT_LUN_TYPE'] = 'Thin' + extra_constants['SUPPORT_CLONE_MODE'] = ('fastclone', 'luncopy') + else: + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'minIOPS', 'minBandWidth', + 'maxBandWidth', 'latency', 'IOType') + extra_constants['QOS_IOTYPES'] = ('0', '1', '2') + extra_constants['SUPPORT_LUN_TYPES'] = ('Thick', 'Thin') + extra_constants['DEFAULT_LUN_TYPE'] = 'Thick' + extra_constants['SUPPORT_CLONE_MODE'] = ('luncopy',) + + for k in extra_constants: + setattr(constants, k, extra_constants[k]) + + def _san_product(self, xml_root): + text = xml_root.findtext('Storage/Product') + if not text: + msg = _("SAN product is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + product = text.strip() + if product not in constants.VALID_PRODUCT: + msg = _("Invalid SAN product %(text)s, SAN product must be " + "in %(valid)s.") % {'text': product, + 'valid': constants.VALID_PRODUCT} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self._set_extra_constants_by_product(product) + setattr(self.conf, 'san_product', product) + + def _lun_type(self, xml_root): + lun_type = constants.DEFAULT_LUN_TYPE + text = xml_root.findtext('LUN/LUNType') + if text: + lun_type = text.strip() + if lun_type not in constants.LUN_TYPE_MAP: + msg = _("Invalid lun type %s is configured.") % lun_type + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if lun_type not in constants.SUPPORT_LUN_TYPES: + msg = _("%(array)s array requires %(valid)s lun type, " + "but %(conf)s is specified." + ) % {'array': self.conf.san_product, + 'valid': constants.SUPPORT_LUN_TYPES, + 'conf': lun_type} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'lun_type', constants.LUN_TYPE_MAP[lun_type]) + + def _lun_write_type(self, xml_root): + text = xml_root.findtext('LUN/WriteType') + if text: + write_type = text.strip() + if write_type: + setattr(self.conf, 'write_type', write_type) + + def _lun_prefetch(self, xml_root): + node = xml_root.find('LUN/Prefetch') + if node is not None: + if 'Type' in node.attrib: + prefetch_type = node.attrib['Type'].strip() + setattr(self.conf, 'prefetch_type', prefetch_type) + + if 'Value' in node.attrib: + prefetch_value = node.attrib['Value'].strip() + setattr(self.conf, 'prefetch_value', prefetch_value) + + def _storage_pools(self, xml_root): + text = xml_root.findtext('LUN/StoragePool') + if not text: + msg = _('Storage pool is not configured.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pools = set(x.strip() for x in text.split(';') if x.strip()) + if not pools: + msg = _('No valid storage pool configured.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + setattr(self.conf, 'storage_pools', list(pools)) + + def _force_delete_volume(self, xml_root): + force_delete_volume = False + text = xml_root.findtext('LUN/ForceDeleteVolume') + if text: + if text.lower().strip() in ('true', 'false'): + if text.lower().strip() == 'true': + force_delete_volume = True + else: + msg = _("ForceDeleteVolume configured error, " + "ForceDeleteVolume is %s.") % text + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + setattr(self.conf, 'force_delete_volume', force_delete_volume) + + def _iscsi_info(self, xml_root): + iscsi_info = {} + text = xml_root.findtext('iSCSI/DefaultTargetIP') + if text: + iscsi_info['default_target_ips'] = [ + ip.strip() for ip in text.split() if ip.strip()] + + initiators = {} + nodes = xml_root.findall('iSCSI/Initiator') + for node in nodes or []: + if 'Name' in node.attrib: + initiators[node.attrib['Name']] = node.attrib + if 'HostName' in node.attrib: + initiators[node.attrib['HostName']] = node.attrib + + if nodes and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + iscsi_info['initiators'] = initiators + self._check_hostname_regex_config(iscsi_info) + setattr(self.conf, 'iscsi_info', iscsi_info) + + def _fc_info(self, xml_root): + fc_info = {} + initiators = {} + nodes = xml_root.findall('FC/Initiator') + for node in nodes or []: + if 'Name' in node.attrib: + initiators[node.attrib['Name']] = node.attrib + if 'HostName' in node.attrib: + initiators[node.attrib['HostName']] = node.attrib + + if nodes and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + fc_info['initiators'] = initiators + self._check_hostname_regex_config(fc_info) + setattr(self.conf, 'fc_info', fc_info) + + def _check_hostname_regex_config(self, info): + for item in info['initiators'].keys(): + ini = info['initiators'][item] + if ini.get("HostName"): + try: + if ini.get("HostName") == '*': + continue + re.compile(ini['HostName']) + except Exception as err: + msg = _('Invalid initiator configuration. ' + 'Reason: %s.') % err + LOG.error(msg) + raise exception.InvalidInput(msg) + + def _convert_one_iscsi_info(self, ini_text): + # get initiator configure attr list + attr_list = re.split('[{;}]', ini_text) + + # get initiator configures + ini = {} + for attr in attr_list: + if not attr: + continue + + pair = attr.split(':', 1) + if pair[0] == 'CHAPinfo': + value = pair[1].replace('#', ';', 1) + else: + value = pair[1] + ini[pair[0]] = value + if 'Name' not in ini and 'HostName' not in ini: + msg = _('Name or HostName must be specified for' + ' initiator.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + return ini + + def _parse_remote_initiator_info(self, dev, ini_type): + ini_info = {'default_target_ips': []} + + if dev.get('iscsi_default_target_ip'): + ini_info['default_target_ips'] = dev[ + 'iscsi_default_target_ip'].split(';') + + initiators = {} + if ini_type in dev: + # Analyze initiators configure text, convert to: + # [{'Name':'xxx'}, {'Name':'xxx','CHAPinfo':'mm-usr#mm-pwd'}] + ini_list = re.split('\n', dev[ini_type]) + + for text in ini_list: + ini = self._convert_one_iscsi_info(text.strip()) + if 'Name' in ini: + initiators[ini['Name']] = ini + if 'HostName' in ini: + initiators[ini['HostName']] = ini + + if ini_list and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + ini_info['initiators'] = initiators + self._check_hostname_regex_config(ini_info) + return ini_info + + def _check_ssl_valid(self, dev): + ssl_cert_verify = dev.get('ssl_cert_verify', 'false') + ssl_verify = self._parser_ssl_value(ssl_cert_verify) + ssl_cert_path = dev.get('ssl_cert_path') + if not ssl_cert_path and ssl_verify: + msg = _("Cert path is necessary if SSLCertVerify is True.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return ssl_verify + + def _hypermetro_devices(self, xml_root): + dev = self.conf.safe_get('hypermetro_device') + config = {} + + if dev: + ssl_verify = self._check_ssl_valid(dev) + config = { + 'san_address': dev['san_address'].split(';'), + 'san_user': dev['san_user'], + 'san_password': dev['san_password'], + 'vstore_name': dev.get('vstore_name'), + 'ssl_cert_verify': ssl_verify, + 'ssl_cert_path': dev.get('ssl_cert_path'), + 'metro_domain': dev['metro_domain'], + 'storage_pools': dev['storage_pool'].split(';')[:1], + 'iscsi_info': self._parse_remote_initiator_info( + dev, 'iscsi_info'), + 'fc_info': self._parse_remote_initiator_info( + dev, 'fc_info'), + 'sync_speed': self.conf.hyper_sync_speed, + 'metro_sync_completed': dev['metro_sync_completed'] + if 'metro_sync_completed' in dev else "True", + 'in_band_or_not': dev['in_band_or_not'].lower() == 'true' + if 'in_band_or_not' in dev else False, + 'storage_sn': dev.get('storage_sn') + } + + setattr(self.conf, 'hypermetro', config) + + def _replication_devices(self, xml_root): + replication_devs = self.conf.safe_get('replication_device') + config = {} + + if replication_devs: + dev = replication_devs[0] + ssl_verify = self._check_ssl_valid(dev) + config = { + 'backend_id': dev['backend_id'], + 'san_address': dev['san_address'].split(';'), + 'san_user': dev['san_user'], + 'san_password': dev['san_password'], + 'vstore_name': dev.get('vstore_name'), + 'ssl_cert_verify': ssl_verify, + 'ssl_cert_path': dev.get('ssl_cert_path'), + 'storage_pools': dev['storage_pool'].split(';')[:1], + 'iscsi_info': self._parse_remote_initiator_info( + dev, 'iscsi_info'), + 'fc_info': self._parse_remote_initiator_info( + dev, 'fc_info'), + 'sync_speed': self.conf.replica_sync_speed, + 'in_band_or_not': dev['in_band_or_not'].lower() == 'true' + if 'in_band_or_not' in dev else False, + 'storage_sn': dev.get('storage_sn') + } + + setattr(self.conf, 'replication', config) + + def _lun_copy_speed(self, xml_root): + text = xml_root.findtext('LUN/LUNCopySpeed') + if text and text.strip() not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("Invalid LUNCopySpeed '%(text)s', LUNCopySpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.LUN_COPY_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'lun_copy_speed', int(speed)) + + def _lun_copy_mode(self, xml_root): + clone_mode = constants.DEFAULT_CLONE_MODE + text = xml_root.findtext('LUN/LUNCloneMode') + if text: + clone_mode = text.strip() + if clone_mode not in constants.SUPPORT_CLONE_MODE: + msg = _("%(array)s array requires %(valid)s lun type, " + "but %(conf)s is specified." + ) % {'array': self.conf.san_product, + 'valid': constants.SUPPORT_CLONE_MODE, + 'conf': clone_mode} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'clone_mode', clone_mode) + + def _hyper_pair_sync_speed(self, xml_root): + text = xml_root.findtext('LUN/HyperSyncSpeed') + if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES: + msg = (_("Invalid HyperSyncSpeed '%(text)s', HyperSyncSpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, "low": constants.HYPER_SYNC_SPEED_LOW, + "high": constants.HYPER_SYNC_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.HYPER_SYNC_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'hyper_sync_speed', int(speed)) + + def _hyper_enforce_multipath(self, xml_root): + enforce_multipath_for_hypermetro = True + text = xml_root.findtext('LUN/HyperEnforceMultipath') + if text: + if text.lower().strip() in ('true', 'false'): + if text.lower().strip() == 'false': + enforce_multipath_for_hypermetro = False + else: + msg = _("HyperEnforceMultipath configured error, " + "HyperEnforceMultipath is %s.") % text + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + setattr(self.conf, 'enforce_multipath_for_hypermetro', + enforce_multipath_for_hypermetro) + + def _replication_pair_sync_speed(self, xml_root): + text = xml_root.findtext('LUN/ReplicaSyncSpeed') + if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES: + msg = (_("Invalid ReplicaSyncSpeed '%(text)s', ReplicaSyncSpeed " + "must be between %(low)s and %(high)s.") + % {"text": text, "low": constants.REPLICA_SYNC_SPEED_LOW, + "high": constants.REPLICA_SYNC_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.REPLICA_SYNC_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'replica_sync_speed', int(speed)) + + def _lun_copy_wait_interval(self, xml_root): + text = xml_root.findtext('LUN/LUNcopyWaitInterval') + + if text and not text.isdigit(): + msg = (_("Invalid LUN_Copy_Wait_Interval '%s', " + "LUN_Copy_Wait_Interval must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL + setattr(self.conf, 'lun_copy_wait_interval', int(interval)) + + def _lun_timeout(self, xml_root): + text = xml_root.findtext('LUN/Timeout') + + if text and not text.isdigit(): + msg = (_("Invalid LUN timeout '%s', LUN timeout must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT + setattr(self.conf, 'lun_timeout', int(interval)) + + def _get_minimum_fc_initiator(self, xml_root): + text = xml_root.findtext('FC/MinOnlineFCInitiator') + minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if text and text.strip() and text.strip().isdigit(): + try: + minimum_fc_initiator = int(text.strip()) + except Exception as err: + msg = (_("Minimum FC initiator number %(num)s is set" + " too large, reason is %(err)s") + % {"num": text.strip(), "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + setattr(self.conf, 'min_fc_ini_online', + minimum_fc_initiator) + + def _rollback_speed(self, xml_root): + text = xml_root.findtext('LUN/SnapshotRollbackSpeed') + if text and text.strip() not in constants.SNAPSHOT_ROLLBACK_SPEED_TYPES: + msg = (_("Invalid SnapshotRollbackSpeed '%(text)s', " + "SnapshotRollbackSpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, + "low": constants.SNAPSHOT_ROLLBACK_SPEED_LOW, + "high": constants.SNAPSHOT_ROLLBACK_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.SNAPSHOT_ROLLBACK_SPEED_HIGH + else: + speed = text.strip() + setattr(self.conf, 'rollback_speed', int(speed)) + + def _get_local_in_band_or_not(self, xml_root): + in_band_or_not = False + text = xml_root.findtext('Storage/InBandOrNot') + if text: + if text.lower() in ('true', 'false'): + in_band_or_not = text.lower() == 'true' + else: + msg = _("InBandOrNot configured error.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'in_band_or_not', in_band_or_not) + + def _get_local_storage_sn(self, xml_root): + text = xml_root.findtext('Storage/Storagesn') + storagen_sn = text.strip() if text else None + + setattr(self.conf, 'storage_sn', storagen_sn) + + @staticmethod + def _set_qos_ignored_param(xml_root): + text = xml_root.findtext('LUN/QosIgnoredParam') + qos_ignored_params = [] + if text: + qos_ignored_params = text.split(';') + qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) + setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Bobcat/huawei_driver.py b/Cinder/Bobcat/huawei_driver.py new file mode 100644 index 0000000..8c09a2e --- /dev/null +++ b/Cinder/Bobcat/huawei_driver.py @@ -0,0 +1,300 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from cinder import coordination +from cinder import exception +from cinder.i18n import _ +from cinder import interface + +from cinder.volume import driver +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_base_driver +from cinder.volume.drivers.huawei import huawei_flow +from cinder.volume.drivers.huawei import huawei_utils +from cinder.zonemanager import utils as zm_utils + + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class HuaweiISCSIDriver(huawei_base_driver.HuaweiBaseDriver, + driver.ISCSIDriver): + def __init__(self, *args, **kwargs): + super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + if not self._stats or refresh: + super(HuaweiISCSIDriver, self).get_volume_stats() + self._stats['storage_protocol'] = 'iSCSI' + + return self._stats + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + LOG.info('Initialize iscsi connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + if (not connector.get('multipath') and + self.configuration.enforce_multipath_for_hypermetro): + msg = _("Mapping hypermetro volume must use multipath.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif (not connector.get('multipath') and + not self.configuration.enforce_multipath_for_hypermetro): + LOG.warning("Mapping hypermetro volume not use multipath," + " so just mapping the local lun.") + if not self.hypermetro_rmt_cli: + msg = _("Mapping hypermetro volume requires remote.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + local_mapping = huawei_flow.initialize_iscsi_connection( + volume, constants.LUN_TYPE, connector, self.local_cli, + self.configuration) + if metadata.get('hypermetro') and connector.get('multipath'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _("Mapping hypermetro remote volume error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remote_mapping = huawei_flow.initialize_remote_iscsi_connection( + hypermetro['ID'], connector, self.hypermetro_rmt_cli, + self.configuration) + + same_host_lun_id = self._change_same_host_lun_id( + local_mapping, remote_mapping) + mapping_info = self._merge_iscsi_mapping( + local_mapping, remote_mapping, same_host_lun_id) + else: + mapping_info = local_mapping + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'iscsi', + 'data': mapping_info} + LOG.info('Initialize iscsi connection successfully,' + 'return data is: %s.', + huawei_utils.mask_dict_sensitive_info(conn)) + return conn + + def terminate_connection(self, volume, connector, **kwargs): + if connector is None or 'host' not in connector: + host = "" + else: + host = connector.get('host', "") + + return self._terminate_connection_locked(host, volume, connector) + + @coordination.synchronized('huawei-mapping-{host}') + def _terminate_connection_locked(self, host, volume, connector): + LOG.info('Terminate iscsi connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + if self._is_volume_multi_attach_to_same_host(volume, connector): + return + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if hypermetro: + huawei_flow.terminate_remote_iscsi_connection( + hypermetro['ID'], connector, self.hypermetro_rmt_cli, + self.configuration) + + huawei_flow.terminate_iscsi_connection( + volume, constants.LUN_TYPE, connector, self.local_cli, + self.configuration) + LOG.info('Terminate iscsi connection successfully.') + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Initialize iscsi connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + mapping_info = huawei_flow.initialize_iscsi_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.local_cli, + self.configuration) + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'iscsi', + 'data': mapping_info} + LOG.info('Initialize iscsi connection successfully: %s.', conn) + return conn + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + if connector is None or 'host' not in connector: + host = "" + else: + host = connector.get('host', "") + + return self._terminate_connection_snapshot_locked(host, snapshot, + connector) + + @coordination.synchronized('huawei-mapping-{host}') + def _terminate_connection_snapshot_locked(self, host, snapshot, connector): + LOG.info('Terminate iscsi connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + huawei_flow.terminate_iscsi_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.local_cli, + self.configuration) + LOG.info('Terminate iscsi connection successfully.') + + +@interface.volumedriver +class HuaweiFCDriver(huawei_base_driver.HuaweiBaseDriver, + driver.FibreChannelDriver): + def __init__(self, *args, **kwargs): + super(HuaweiFCDriver, self).__init__(*args, **kwargs) + self.fc_san = zm_utils.create_lookup_service() + + def get_volume_stats(self, refresh=False): + if not self._stats or refresh: + super(HuaweiFCDriver, self).get_volume_stats() + self._stats['storage_protocol'] = 'FC' + + return self._stats + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + LOG.info('Initialize FC connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + if (not connector.get('multipath') and + self.configuration.enforce_multipath_for_hypermetro): + msg = _("Mapping hypermetro volume must use multipath.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif (not connector.get('multipath') and + not self.configuration.enforce_multipath_for_hypermetro): + LOG.warning("Mapping hypermetro volume not use multipath," + " so just mapping the local lun.") + if not self.hypermetro_rmt_cli: + msg = _("Mapping hypermetro volume requires remote.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + local_mapping = huawei_flow.initialize_fc_connection( + volume, constants.LUN_TYPE, connector, self.fc_san, self.local_cli, + self.configuration) + if metadata.get('hypermetro') and connector.get('multipath'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _("Mapping hypermetro remote volume error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remote_mapping = huawei_flow.initialize_remote_fc_connection( + hypermetro['ID'], connector, self.fc_san, + self.hypermetro_rmt_cli, self.configuration) + same_host_lun_id = self._change_same_host_lun_id( + local_mapping, remote_mapping) + mapping_info = self._merge_fc_mapping( + local_mapping, remote_mapping, same_host_lun_id) + else: + mapping_info = local_mapping + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'fibre_channel', + 'data': mapping_info} + LOG.info('Initialize FC connection successfully: %s.', conn) + zm_utils.add_fc_zone(conn) + return conn + + def terminate_connection(self, volume, connector, **kwargs): + if connector is None or 'host' not in connector: + host = "" + else: + host = connector.get('host', "") + + return self._terminate_connection_locked(host, volume, connector) + + @coordination.synchronized('huawei-mapping-{host}') + def _terminate_connection_locked(self, host, volume, connector): + LOG.info('Terminate FC connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + if self._is_volume_multi_attach_to_same_host(volume, connector): + return + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if hypermetro: + rmt_ini_tgt_map = huawei_flow.terminate_remote_fc_connection( + hypermetro['ID'], connector, self.fc_san, + self.hypermetro_rmt_cli, self.configuration) + + loc_ini_tgt_map = huawei_flow.terminate_fc_connection( + volume, constants.LUN_TYPE, connector, self.fc_san, self.local_cli, + self.configuration) + if metadata.get('hypermetro'): + self._merge_ini_tgt_map(loc_ini_tgt_map, rmt_ini_tgt_map) + + conn = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': loc_ini_tgt_map}, + } + LOG.info('Terminate FC connection successfully: %s.', conn) + zm_utils.remove_fc_zone(conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Initialize FC connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + mapping_info = huawei_flow.initialize_fc_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.fc_san, + self.local_cli, self.configuration) + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'fibre_channel', + 'data': mapping_info} + LOG.info('Initialize FC connection successfully: %s.', conn) + zm_utils.add_fc_zone(conn) + return conn + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + if connector is None or 'host' not in connector: + host = "" + else: + host = connector.get('host', "") + + return self._terminate_connection_snapshot_locked(host, snapshot, + connector) + + @coordination.synchronized('huawei-mapping-{host}') + def _terminate_connection_snapshot_locked(self, host, snapshot, connector): + LOG.info('Terminate FC connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + ini_tgt_map = huawei_flow.terminate_fc_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.fc_san, + self.local_cli, self.configuration) + + conn = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': ini_tgt_map}, + } + LOG.info('Terminate FC connection successfully: %s.', conn) + zm_utils.remove_fc_zone(conn) + return conn diff --git a/Cinder/Bobcat/huawei_flow.py b/Cinder/Bobcat/huawei_flow.py new file mode 100644 index 0000000..15e3fc1 --- /dev/null +++ b/Cinder/Bobcat/huawei_flow.py @@ -0,0 +1,2770 @@ +# Copyright (c) 2017 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import json +import six +import uuid + +from oslo_log import log as logging +from oslo_utils import strutils + +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import smartx +from cinder.volume import volume_utils + +LOG = logging.getLogger(__name__) + + +class LunOptsCheckTask(task.Task): + default_provides = 'opts' + + def __init__(self, client, feature_support, configuration, new_opts=None, + *args, **kwargs): + super(LunOptsCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.configuration = configuration + self.new_opts = new_opts + + def execute(self, volume): + if self.new_opts: + opts = self.new_opts + else: + is_dorado_v6 = self.configuration.is_dorado_v6 + opts = huawei_utils.get_volume_params(volume, is_dorado_v6) + + huawei_utils.check_volume_type_valid(opts) + + feature_pairs = ( + ('qos', 'SmartQoS'), + ('smartcache', 'SmartCache'), + ('smartpartition', 'SmartPartition'), + ('hypermetro', 'HyperMetro'), + ('replication_enabled', 'HyperReplication'), + ('policy', 'SmartTier'), + ('dedup', 'SmartDedupe[\s\S]*LUN'), + ('compression', 'SmartCompression[\s\S]*LUN'), + ) + + for feature in feature_pairs: + if opts.get(feature[0]) and not self.feature_support[feature[1]]: + msg = _("Huawei storage doesn't support %s.") % feature[1] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if opts.get('smartcache'): + smartcache = smartx.SmartCache(self.client) + smartcache.check_cache_valid(opts['cachename']) + + if opts.get('smartpartition'): + smartpartition = smartx.SmartPartition(self.client) + smartpartition.check_partition_valid(opts['partitionname']) + + return opts + + +class CreateLunTask(task.Task): + default_provides = ('lun_id', 'lun_info') + + def __init__(self, client, configuration, feature_support, + *args, **kwargs): + super(CreateLunTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + self.feature_support = feature_support + + def _get_lun_application_name(self, opts, lun_params): + if opts.get('applicationname') is not None: + workload_type_id = self.client.get_workload_type_id( + opts['applicationname']) + if workload_type_id: + lun_params['WORKLOADTYPEID'] = workload_type_id + else: + msg = _("The workload type %s is not exist. Please create it " + "on the array") % opts['applicationname'] + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return lun_params + + def execute(self, volume, opts, src_size=None): + pool_name = volume_utils.extract_host(volume.host, level='pool') + pool_id = self.client.get_pool_id(pool_name) + if not pool_id: + msg = _("Pool %s doesn't exist in storage.") % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_params = { + 'NAME': huawei_utils.encode_name(volume.id), + 'PARENTID': pool_id, + 'DESCRIPTION': volume.name, + 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), + 'CAPACITY': int(int(src_size) * constants.CAPACITY_UNIT if src_size + else int(volume.size) * constants.CAPACITY_UNIT), + } + + if opts.get('controllername'): + controller = self.client.get_controller_id(opts['controllername']) + if controller: + lun_params['OWNINGCONTROLLER'] = controller + if hasattr(self.configuration, 'write_type'): + lun_params['WRITEPOLICY'] = self.configuration.write_type + if hasattr(self.configuration, 'prefetch_type'): + lun_params['PREFETCHPOLICY'] = self.configuration.prefetch_type + if hasattr(self.configuration, 'prefetch_value'): + lun_params['PREFETCHVALUE'] = self.configuration.prefetch_value + if opts.get('policy'): + lun_params['DATATRANSFERPOLICY'] = opts['policy'] + + if opts.get('dedup') is not None: + lun_params['ENABLESMARTDEDUP'] = opts['dedup'] + elif not self.feature_support['SmartDedupe[\s\S]*LUN']: + lun_params['ENABLESMARTDEDUP'] = False + + if opts.get('compression') is not None: + lun_params['ENABLECOMPRESSION'] = opts['compression'] + elif not self.feature_support['SmartCompression[\s\S]*LUN']: + lun_params['ENABLECOMPRESSION'] = False + + lun_params = self._get_lun_application_name(opts, lun_params) + + lun = self.client.create_lun(lun_params) + return lun['ID'], lun + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result[0]) + + +class WaitLunOnlineTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(WaitLunOnlineTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id): + huawei_utils.wait_lun_online(self.client, lun_id) + + +class AddQoSTask(task.Task): + default_provides = 'qos_id' + + def __init__(self, client, configuration, *args, **kwargs): + super(AddQoSTask, self).__init__(*args, **kwargs) + self.smartqos = smartx.SmartQos(client, configuration.is_dorado_v6) + + def execute(self, lun_id, opts): + if opts.get('qos'): + qos_id = self.smartqos.add(opts['qos'], lun_id) + return qos_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartqos.remove(result, lun_id) + + +class AddCacheTask(task.Task): + default_provides = 'cache_id' + + def __init__(self, client, *args, **kwargs): + super(AddCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_id, opts): + if opts.get('smartcache'): + cache_id = self.smartcache.add(opts['cachename'], lun_id) + return cache_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartcache.remove(result, lun_id) + + +class AddPartitionTask(task.Task): + default_provides = 'partition_id' + + def __init__(self, client, *args, **kwargs): + super(AddPartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_id, opts): + if opts.get('smartpartition'): + partition_id = self.smartpartition.add( + opts['partitionname'], lun_id) + return partition_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartpartition.remove(result, lun_id) + + +class CreateHyperMetroTask(task.Task): + default_provides = 'hypermetro_id' + + def __init__(self, local_cli, remote_cli, config, is_sync=True, + *args, **kwargs): + super(CreateHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.loc_client = local_cli + self.rmt_client = remote_cli + self.sync = is_sync + + def execute(self, volume, lun_id, lun_info, opts): + metadata = huawei_utils.get_volume_private_data(volume) + hypermetro_id = None + + if not opts.get('hypermetro'): + return hypermetro_id + + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.loc_client, volume) + hypermetro_id = hypermetro.get('ID') if hypermetro else None + + if not hypermetro_id: + lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY', + 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = huawei_utils.encode_name(volume.id) + lun_params['DESCRIPTION'] = volume.name + if (lun_info.get("WORKLOADTYPENAME") and + lun_info.get("WORKLOADTYPEID")): + workload_type_name = self.loc_client.get_workload_type_name( + lun_info['WORKLOADTYPEID']) + rmt_workload_type_id = self.rmt_client.get_workload_type_id( + workload_type_name) + if rmt_workload_type_id: + lun_params['WORKLOADTYPEID'] = rmt_workload_type_id + else: + msg = _("The workload type %s is not exist. Please create " + "it on the array") % workload_type_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + hypermetro_id = self.hypermetro.create_hypermetro( + lun_id, lun_params, self.sync) + + return hypermetro_id + + def revert(self, result, volume, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.hypermetro.delete_hypermetro(volume) + + +class AddHyperMetroGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(AddHyperMetroGroupTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + + def execute(self, volume, hypermetro_id): + if volume.group_id and hypermetro_id: + self.hypermetro.add_hypermetro_to_group( + volume.group_id, hypermetro_id) + + +class CreateReplicationTask(task.Task): + default_provides = 'replication_id' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(CreateReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + self.loc_client = local_cli + self.rmt_client = remote_cli + + def execute(self, volume, lun_id, lun_info, opts): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + + if opts.get('replication_enabled') and not pair_id: + lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY', + 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = huawei_utils.encode_name(volume.id) + lun_params['DESCRIPTION'] = volume.name + if (lun_info.get("WORKLOADTYPENAME") and + lun_info.get("WORKLOADTYPEID")): + workload_type_name = self.loc_client.get_workload_type_name( + lun_info['WORKLOADTYPEID']) + rmt_workload_type_id = self.rmt_client.get_workload_type_id( + workload_type_name) + if rmt_workload_type_id: + lun_params['WORKLOADTYPEID'] = rmt_workload_type_id + else: + msg = _("The workload type %s is not exist. Please create " + "it on the array") % workload_type_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pair_id = self.replication.create_replica( + lun_id, lun_params, opts['replication_type']) + elif not opts.get('replication_enabled') and pair_id: + pair_id = None + + return pair_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.replication.delete_replica(result) + + +class AddReplicationGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(AddReplicationGroupTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, replication_id): + if volume.group_id and replication_id: + self.replication.add_replication_to_group( + volume.group_id, replication_id) + + +class CheckLunExistTask(task.Task): + default_provides = ('lun_info', 'lun_id') + + def __init__(self, client, *args, **kwargs): + super(CheckLunExistTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume): + lun_info = huawei_utils.get_lun_info(self.client, volume) + if not lun_info: + msg = _("Volume %s does not exist.") % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return lun_info, lun_info['ID'] + + +class CheckLunIsInUse(task.Task): + def __init__(self, *args, **kwargs): + super(CheckLunIsInUse, self).__init__(*args, **kwargs) + + def execute(self, opts, volume, lun_info): + """ + opts: come from LunOptsCheckTask + lun_info: come from CheckLunExistTask + """ + add_hypermetro = False + delete_hypermetro = False + metadata = huawei_utils.get_volume_private_data(volume) + in_use_lun = lun_info.get('EXPOSEDTOINITIATOR') == 'true' + if opts.get('hypermetro'): + if not metadata.get('hypermetro'): + add_hypermetro = True + else: + if metadata.get('hypermetro'): + delete_hypermetro = True + + if (add_hypermetro or delete_hypermetro) and in_use_lun: + msg = _("Cann't add hypermetro to the volume in use.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +class GetLunIDTask(task.Task): + default_provides = 'lun_id' + + def __init__(self, client, *args, **kwargs): + super(GetLunIDTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume): + lun_info = huawei_utils.get_lun_info(self.client, volume) + if not lun_info: + LOG.error("Volume %s does not exist.", volume.id) + return None + + return lun_info['ID'] + + +class CheckLunMappedTask(task.Task): + def __init__(self, client, configuration, *args, **kwargs): + super(CheckLunMappedTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, lun_info): + if lun_info.get('EXPOSEDTOINITIATOR') == 'true': + msg = _("LUN %s has been mapped to host. Now force to " + "delete it") % lun_info['ID'] + LOG.warning(msg) + huawei_utils.remove_lun_from_lungroup( + self.client, lun_info["ID"], + self.configuration.force_delete_volume) + + +class DeleteHyperMetroTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(DeleteHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + + def execute(self, volume, opts=None): + metadata = huawei_utils.get_volume_private_data(volume) + + if ((not opts or not opts.get('hypermetro')) + and metadata.get('hypermetro')): + self.hypermetro.delete_hypermetro(volume) + + +class DeleteReplicationTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(DeleteReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, opts=None): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + if (not opts or not opts.get('replication_enabled')) and pair_id: + self.replication.delete_replica(pair_id) + + +class DeleteQoSTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteQoSTask, self).__init__(*args, **kwargs) + self.smartqos = smartx.SmartQos(client) + + def execute(self, lun_info): + qos_id = lun_info.get('IOCLASSID') + if qos_id: + self.smartqos.remove(qos_id, lun_info['ID']) + + +class DeleteCacheTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_info): + cache_id = lun_info.get('SMARTCACHEPARTITIONID') + if cache_id: + self.smartcache.remove(cache_id, lun_info['ID']) + + +class DeletePartitionTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeletePartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_info): + partition_id = lun_info.get('CACHEPARTITIONID') + if partition_id: + self.smartpartition.remove(partition_id, lun_info['ID']) + + +class DeleteLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id): + self.client.delete_lun(lun_id) + + +class CreateMigratedLunTask(task.Task): + default_provides = ('tgt_lun_id', 'tgt_lun_info') + + def __init__(self, client, host, feature_support, *args, **kwargs): + super(CreateMigratedLunTask, self).__init__(*args, **kwargs) + self.client = client + self.host = host + self.feature_support = feature_support + + def execute(self, lun_info, opts=None): + if not self.feature_support['SmartMigration']: + msg = _("Huawei storage doesn't support SmartMigration.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + target_device = self.host['capabilities']['location_info'] + if target_device != self.client.device_id: + msg = _("Migrate target %(tgt)s is not the same storage as " + "%(org)s.") % {'tgt': target_device, + 'org': self.client.device_id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + pool_name = self.host['capabilities']['pool_name'] + pool_id = self.client.get_pool_id(pool_name) + if not pool_id: + msg = _("Pool %s doesn't exist in storage.") % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if opts: + new_lun_type = opts.get('LUNType') + tier_policy = opts.get('policy') + else: + new_lun_type = None + tier_policy = None + + lun_keys = ('DESCRIPTION', 'ALLOCTYPE', 'CAPACITY', 'WRITEPOLICY', + 'PREFETCHPOLICY', 'PREFETCHVALUE', 'DATATRANSFERPOLICY', + 'OWNINGCONTROLLER') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = lun_info['NAME'][:-4] + '-mig' + lun_params['PARENTID'] = pool_id + if new_lun_type: + lun_params['ALLOCTYPE'] = new_lun_type + if tier_policy: + lun_params['DATATRANSFERPOLICY'] = tier_policy + if lun_info.get("WORKLOADTYPENAME") and lun_info.get( + "WORKLOADTYPEID"): + lun_params["WORKLOADTYPEID"] = lun_info["WORKLOADTYPEID"] + + lun = self.client.create_lun(lun_params) + return lun['ID'], lun + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result[0]) + + +class CreateMigrateTask(task.Task): + default_provides = 'migration_id' + + def __init__(self, client, *args, **kwargs): + super(CreateMigrateTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, src_lun_id, tgt_lun_id): + migration = self.client.create_lun_migration(src_lun_id, tgt_lun_id) + return migration['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun_migration(result) + + +class WaitMigrateDoneTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(WaitMigrateDoneTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, migration_id, tgt_lun_id): + def _migrate_done(): + migration = self.client.get_lun_migration(migration_id) + if (migration['RUNNINGSTATUS'] in + constants.MIGRATION_STATUS_IN_PROCESS): + return False + elif (migration['RUNNINGSTATUS'] in + constants.MIGRATION_STATUS_COMPLETE): + return True + else: + msg = _("Migration %s error.") % migration_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + huawei_utils.wait_for_condition(_migrate_done, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + self.client.delete_lun_migration(migration_id) + self.client.delete_lun(tgt_lun_id) + + +class CheckSnapshotExistTask(task.Task): + default_provides = ('snapshot_info', 'snapshot_id') + + def __init__(self, client, *args, **kwargs): + super(CheckSnapshotExistTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot): + snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) + if not snapshot_info: + msg = _("Snapshot %s does not exist.") % snapshot.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return snapshot_info, snapshot_info['ID'] + + +class GetSnapshotIDTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, *args, **kwargs): + super(GetSnapshotIDTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot): + snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) + if not snapshot_info: + LOG.error("Snapshot %s does not exist.", snapshot.id) + return None + + return snapshot_info['ID'] + + +class CreateLunCopyTask(task.Task): + default_provides = 'luncopy_id' + + def __init__(self, client, feature_support, configuration, + *args, **kwargs): + super(CreateLunCopyTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.configuration = configuration + + def execute(self, volume, snapshot_id, lun_id): + if not self.feature_support['HyperCopy']: + msg = _("Huawei storage doesn't support HyperCopy.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + copy_name = huawei_utils.encode_name(volume.id) + metadata = huawei_utils.get_volume_private_data(volume) + copyspeed = metadata.get('copyspeed') + if not copyspeed: + copyspeed = self.configuration.lun_copy_speed + elif copyspeed not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("LUN copy speed is: %(speed)s. It should be between " + "%(low)s and %(high)s.") + % {"speed": copyspeed, + "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGH}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + luncopy_id = self.client.create_luncopy( + copy_name, snapshot_id, lun_id, copyspeed) + return luncopy_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_luncopy(result) + + +class WaitLunCopyDoneTask(task.Task): + def __init__(self, client, configuration, *args, **kwargs): + super(WaitLunCopyDoneTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, luncopy_id): + self.client.start_luncopy(luncopy_id) + + def _luncopy_done(): + luncopy = self.client.get_luncopy_info(luncopy_id) + if luncopy['HEALTHSTATUS'] != constants.STATUS_HEALTH: + msg = _("Luncopy %s is abnormal.") % luncopy_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return (luncopy['RUNNINGSTATUS'] in + constants.LUNCOPY_STATUS_COMPLETE) + huawei_utils.wait_for_condition( + _luncopy_done, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + + self.client.delete_luncopy(luncopy_id) + + +class CreateClonePairTask(task.Task): + default_provides = 'clone_pair_id' + + def __init__(self, client, feature_support, configuration, + *args, **kwargs): + super(CreateClonePairTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.configuration = configuration + + def execute(self, source_id, target_id): + clone_speed = self.configuration.lun_copy_speed + clone_pair_id = self.client.create_clone_pair( + source_id, target_id, clone_speed) + return clone_pair_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_clone_pair(result) + + +class WaitClonePairDoneTask(task.Task): + def __init__(self, client, configuration, *args, **kwargs): + super(WaitClonePairDoneTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, clone_pair_id): + def _clone_pair_done(): + clone_pair_info = self.client.get_clone_pair_info(clone_pair_id) + if clone_pair_info['copyStatus'] != constants.CLONE_STATUS_HEALTH: + msg = _("ClonePair %s is abnormal.") % clone_pair_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return (clone_pair_info['syncStatus'] in + constants.CLONE_STATUS_COMPLETE) + + self.client.sync_clone_pair(clone_pair_id) + huawei_utils.wait_for_condition( + _clone_pair_done, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + self.client.delete_clone_pair(clone_pair_id) + + +class CreateLunCloneTask(task.Task): + default_provides = 'lun_id', 'lun_info' + + def __init__(self, client, *args, **kwargs): + super(CreateLunCloneTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume, src_id): + name = huawei_utils.encode_name(volume.id) + lun_info = self.client.create_lunclone(src_id, name) + lun_id = lun_info["ID"] + expected_size = int(volume.size) * constants.CAPACITY_UNIT + try: + if int(lun_info['CAPACITY']) < expected_size: + self.client.extend_lun(lun_id, expected_size) + self.client.split_lunclone(lun_id) + except Exception: + LOG.exception('Split clone lun %s error.', lun_id) + self.client.delete_lun(lun_id) + raise + + lun_info = self.client.get_lun_info_by_id(lun_id) + return lun_info['ID'], lun_info + + +class LunClonePreCheckTask(task.Task): + def __init__(self, *args, **kwargs): + super(LunClonePreCheckTask, self).__init__(*args, **kwargs) + + @staticmethod + def execute(volume, src_volume): + if volume.volume_type_id != src_volume.volume_type_id: + msg = _("Volume type must be the same as source " + "for fast clone.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +class CreateSnapshotTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, feature_support, *args, **kwargs): + super(CreateSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + + def execute(self, snapshot): + if not self.feature_support['HyperSnap']: + msg = _("Huawei storage doesn't support snapshot.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_info = huawei_utils.get_lun_info(self.client, snapshot.volume) + if not lun_info: + msg = _("Source volume %s to create snapshot does not exist." + ) % snapshot.volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + name = huawei_utils.encode_name(snapshot.id) + snapshot_info = self.client.create_snapshot( + lun_info['ID'], name, snapshot.id) + return snapshot_info['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_snapshot(result) + + +class CreateTempSnapshotTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, feature_support, *args, **kwargs): + super(CreateTempSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + + def execute(self, src_id): + if not self.feature_support['HyperSnap']: + msg = _("Huawei storage doesn't support snapshot.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + snap_id = six.text_type(uuid.uuid4()) + name = huawei_utils.encode_name(snap_id) + snapshot_info = self.client.create_snapshot(src_id, name, snap_id) + return snapshot_info['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_snapshot(result) + + +class ActiveSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ActiveSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + self.client.activate_snapshot(snapshot_id) + + def revert(self, snapshot_id): + self.client.stop_snapshot(snapshot_id) + + +class WaitSnapshotReadyTask(task.Task): + default_provides = 'snapshot_wwn' + + def __init__(self, client, *args, **kwargs): + super(WaitSnapshotReadyTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + def _snapshot_ready(): + self.snapshot = self.client.get_snapshot_info_by_id(snapshot_id) + if self.snapshot['HEALTHSTATUS'] != constants.STATUS_HEALTH: + msg = _("Snapshot %s is fault.") % snapshot_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return not (self.snapshot['RUNNINGSTATUS'] == + constants.SNAPSHOT_INITIALIZING) + + huawei_utils.wait_for_condition(_snapshot_ready, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + return self.snapshot['WWN'] + + +class DeleteSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_info): + if snapshot_info['RUNNINGSTATUS'] == constants.SNAPSHOT_ACTIVATED: + self.client.stop_snapshot(snapshot_info['ID']) + self.client.delete_snapshot(snapshot_info['ID']) + + +class DeleteTempSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteTempSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + self.client.stop_snapshot(snapshot_id) + self.client.delete_snapshot(snapshot_id) + + +class RevertToSnapshotTask(task.Task): + def __init__(self, client, rollback_speed, *args, **kwargs): + super(RevertToSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + self.rollback_speed = rollback_speed + + def execute(self, snapshot_info, snapshot_id): + running_status = snapshot_info.get("RUNNINGSTATUS") + health_status = snapshot_info.get("HEALTHSTATUS") + + if running_status not in ( + constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED, + constants.SNAPSHOT_RUNNING_STATUS_ROLLINGBACK): + err_msg = (_("The running status %(status)s of snapshot %(name)s.") + % {"status": running_status, "name": snapshot_id}) + LOG.error(err_msg) + raise exception.InvalidSnapshot(reason=err_msg) + + if health_status not in (constants.SNAPSHOT_HEALTH_STATUS_NORMAL,): + err_msg = (_("The health status %(status)s of snapshot %(name)s.") + % {"status": running_status, "name": snapshot_id}) + LOG.error(err_msg) + raise exception.InvalidSnapshot(reason=err_msg) + + if constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED == snapshot_info.get( + 'RUNNINGSTATUS'): + self.client.rollback_snapshot(snapshot_id, self.rollback_speed) + + def revert(self, result, snapshot_id, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.cancel_rollback_snapshot(snapshot_id) + + +class WaitSnapshotRollbackDoneTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(WaitSnapshotRollbackDoneTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + def _snapshot_rollback_finish(): + snapshot_info = self.client.get_snapshot_info_by_id(snapshot_id) + + if snapshot_info.get('HEALTHSTATUS') not in ( + constants.SNAPSHOT_HEALTH_STATUS_NORMAL,): + msg = _("The snapshot %s is abnormal.") % snapshot_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if (snapshot_info.get('ROLLBACKRATE') == + constants.SNAPSHOT_ROLLBACK_PROGRESS_FINISH or + snapshot_info.get('ROLLBACKENDTIME') != '-1'): + LOG.info("Snapshot %s rollback successful.", snapshot_id) + return True + return False + + huawei_utils.wait_for_condition(_snapshot_rollback_finish, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + +class ExtendVolumeTask(task.Task): + default_provides = 'lun_info' + + def __init__(self, client, *args, **kwargs): + super(ExtendVolumeTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id, new_size): + lun_info = self.client.get_lun_info_by_id(lun_id) + if int(lun_info['CAPACITY']) < new_size: + self.client.extend_lun(lun_id, new_size) + LOG.info('Extend LUN %(id)s to size %(new_size)s.', + {'id': lun_id, + 'new_size': new_size}) + lun_info = self.client.get_lun_info_by_id(lun_id) + return lun_info + + +class ExtendHyperMetroTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(ExtendHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.local_cli = local_cli + + def execute(self, volume, new_size): + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + return + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _('Volume %s is not in hypermetro pair') % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.hypermetro.extend_hypermetro(hypermetro['ID'], new_size) + + +class ExtendReplicationTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(ExtendReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, new_size): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + if pair_id: + self.replication.extend_replica(pair_id, new_size) + + +class UpdateLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_info, opts): + data = {} + compression_check = lun_info.get('ENABLECOMPRESSION') == 'true' + if not opts['compression'] and compression_check: + data["ENABLECOMPRESSION"] = 'false' + + dedup_check = lun_info.get('ENABLESMARTDEDUP') == 'true' + if not opts['dedup'] and dedup_check: + data["ENABLESMARTDEDUP"] = 'false' + + if (opts.get('policy') and + opts['policy'] != lun_info.get('DATATRANSFERPOLICY')): + data["DATATRANSFERPOLICY"] = opts['policy'] + + if data: + self.client.update_lun(lun_info['ID'], data) + + +class UpdateQoSTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateQoSTask, self).__init__(*args, **kwargs) + self.client = client + self.smartqos = smartx.SmartQos(client) + + def execute(self, lun_info, opts): + qos_id = lun_info.get('IOCLASSID') + if opts.get('qos'): + if qos_id: + self.smartqos.update(qos_id, opts['qos'], lun_info['ID']) + else: + self.smartqos.add(opts['qos'], lun_info['ID']) + elif qos_id: + self.smartqos.remove(qos_id, lun_info['ID']) + + +class UpdateCacheTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_info, opts): + cache_id = lun_info.get('SMARTCACHEPARTITIONID') + if opts.get('smartcache'): + if cache_id: + self.smartcache.update( + cache_id, opts['cachename'], lun_info['ID']) + else: + self.smartcache.add(opts['cachename'], lun_info['ID']) + elif cache_id: + self.smartcache.remove(cache_id, lun_info['ID']) + + +class UpdatePartitionTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdatePartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_info, opts): + partition_id = lun_info.get('CACHEPARTITIONID') + if opts.get('smartpartition'): + if partition_id: + self.smartpartition.update( + partition_id, opts['partitionname'], lun_info['ID']) + else: + self.smartpartition.add(opts['partitionname'], lun_info['ID']) + elif partition_id: + self.smartpartition.remove(partition_id, lun_info['ID']) + + +class ManageVolumePreCheckTask(task.Task): + default_provides = ('lun_info', 'lun_id') + + def __init__(self, client, volume, existing_ref, configuration, + *args, **kwargs): + super(ManageVolumePreCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.volume = volume + self.existing_ref = existing_ref + self.configuration = configuration + + def _get_external_lun(self): + lun_info = huawei_utils.get_external_lun_info( + self.client, self.existing_ref) + if not lun_info: + msg = _('External lun %s not exist.') % self.existing_ref + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return lun_info + + def _check_lun_abnormal(self, lun_info, *args): + return lun_info['HEALTHSTATUS'] != constants.STATUS_HEALTH + + def _check_pool_inconsistency(self, lun_info, *args): + pool = volume_utils.extract_host(self.volume.host, 'pool') + return pool != lun_info['PARENTNAME'] + + def _check_lun_in_use(self, lun_info, *args): + return (lun_info.get('ISADD2LUNGROUP') == 'true' or + lun_info.get('EXPOSEDTOINITIATOR') == 'true') + + def _check_lun_in_hypermetro(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('HyperMetro') == 'TRUE' + + def _check_lun_in_replication(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('RemoteReplication') == 'TRUE' + + def _check_lun_in_splitmirror(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('SplitMirror') == 'TRUE' + + def _check_lun_in_hypermirror(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LUNMirror') == 'TRUE' + + def _check_lun_in_luncopy(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LunCopy') == 'TRUE' + + def _check_lun_in_migration(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LunMigration') == 'TRUE' + + def _check_lun_not_common(self, lun_info, *args): + return (lun_info.get('MIRRORTYPE') != '0' or + lun_info.get('SUBTYPE') != '0') + + def _check_lun_consistency(self, lun_info, opts): + return ('LUNType' in opts and + opts['LUNType'] != lun_info['ALLOCTYPE']) + + def _check_lun_dedup_consistency(self, lun_info, opts): + dedup_flag = False + if opts.get('dedup') is not None: + dedup_enabled = lun_info['ENABLESMARTDEDUP'] == 'true' + if opts['dedup'] != dedup_enabled: + dedup_flag = True + return dedup_flag + + def _check_lun_compresison_consistency(self, lun_info, opts): + compression_flag = False + if opts.get('compression') is not None: + compression_enabled = lun_info['ENABLECOMPRESSION'] == 'true' + if opts['compression'] != compression_enabled: + compression_flag = True + return compression_flag + + def execute(self, opts): + lun_info = self._get_external_lun() + + for i in dir(self): + if callable(getattr(self, i)) and i.startswith('_check_'): + func = getattr(self, i) + if func(lun_info, opts): + msg = _("Volume managing pre check %s failed." + ) % func.__name__ + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return lun_info, lun_info['ID'] + + +class ManageLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ManageLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume, lun_info): + new_name = huawei_utils.encode_name(volume.id) + self.client.rename_lun(lun_info['ID'], new_name, volume.name) + + def revert(self, result, lun_info, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.rename_lun(lun_info['ID'], lun_info['NAME'], + lun_info['DESCRIPTION']) + + +class ManageSnapshotPreCheckTask(task.Task): + default_provides = 'snapshot_info' + + def __init__(self, client, snapshot, existing_ref, *args, **kwargs): + super(ManageSnapshotPreCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.snapshot = snapshot + self.existing_ref = existing_ref + + def _get_external_snapshot(self): + snapshot_info = huawei_utils.get_external_snapshot_info( + self.client, self.existing_ref) + if not snapshot_info: + msg = _('External snapshot %s not exist.') % self.existing_ref + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return snapshot_info + + def _check_snapshot_abnormal(self, snapshot_info): + return snapshot_info['HEALTHSTATUS'] != constants.STATUS_HEALTH + + def _check_snapshot_in_use(self, snapshot_info): + return snapshot_info.get('EXPOSEDTOINITIATOR') == 'true' + + def _check_parent_volume_inconsistency(self, snapshot_info): + parent_info = huawei_utils.get_lun_info( + self.client, self.snapshot.volume) + return (not parent_info or + snapshot_info.get('PARENTID') != parent_info['ID']) + + def execute(self): + snapshot_info = self._get_external_snapshot() + for i in dir(self): + if callable(getattr(self, i)) and i.startswith('_check_'): + func = getattr(self, i) + if func(snapshot_info): + msg = _("Snapshot managing pre check %s failed." + ) % func.__name__ + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return snapshot_info + + +class ManageSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ManageSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot, snapshot_info): + new_name = huawei_utils.encode_name(snapshot.id) + data = {'NAME': new_name} + self.client.update_snapshot(snapshot_info['ID'], data) + + if (snapshot_info.get('RUNNINGSTATUS') == + constants.SNAPSHOT_UNACTIVATED): + self.client.activate_snapshot(snapshot_info['ID']) + + +class GroupOptsCheckTask(task.Task): + default_provides = 'opts' + + def __init__(self, *args, **kwargs): + super(GroupOptsCheckTask, self).__init__(*args, **kwargs) + + def execute(self, opts): + for opt in opts: + huawei_utils.check_volume_type_valid(opt) + return opts + + +class CreateHyperMetroGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, feature_support, + *args, **kwargs): + super(CreateHyperMetroGroupTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.feature_support = feature_support + + def execute(self, group, opts): + if any(opt for opt in opts if opt['hypermetro']): + if not self.feature_support['HyperMetro']: + msg = _("Huawei storage doesn't support HyperMetro.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.hypermetro.create_consistencygroup(group.id) + + def revert(self, result, group, **kwargs): + if isinstance(result, failure.Failure): + return + self.hypermetro.delete_consistencygroup(group.id, []) + + +class CreateReplicationGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, feature_support, + *args, **kwargs): + super(CreateReplicationGroupTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + self.feature_support = feature_support + + def execute(self, group, opts): + create_group = False + replication_type = set() + for opt in opts: + if opt['replication_enabled']: + create_group = True + replication_type.add(opt['replication_type']) + + if create_group: + if not self.feature_support['HyperReplication']: + msg = _("Huawei storage doesn't support HyperReplication.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if len(replication_type) != 1: + msg = _("Multiple replication types exist in group.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.replication.create_group(group.id, replication_type.pop()) + + def revert(self, result, group, **kwargs): + if isinstance(result, failure.Failure): + return + self.replication.delete_group(group.id, []) + + +class GetISCSIConnectionTask(task.Task): + default_provides = ('target_ips', 'target_iqns', 'target_eths', + 'config_info') + + def __init__(self, client, iscsi_info, *args, **kwargs): + super(GetISCSIConnectionTask, self).__init__(*args, **kwargs) + self.client = client + self.iscsi_info = iscsi_info + + def _get_config_target_ips(self, ini): + if ini and ini.get('TargetIP'): + target_ips = [ip.strip() for ip in ini['TargetIP'].split() + if ip.strip()] + else: + target_ips = self.iscsi_info['default_target_ips'] + return target_ips + + def _get_port_ip(self, port_id): + iqn_info = port_id.split(',', 1)[0] + return iqn_info.split(':', 5)[5] + + def _get_port_iqn(self, port_id): + iqn_info = port_id.split(',', 1)[0] + return iqn_info.split('+')[1] + + def execute(self, connector, initiator): + ip_iqn_map = {} + target_ports = self.client.get_iscsi_tgt_ports() + for port in target_ports: + ip = self._get_port_ip(port['ID']) + normalized_ip = ipaddress.ip_address(six.text_type(ip)).exploded + ip_iqn_map[normalized_ip] = (port['ID'], port['ETHPORTID']) + + config_info = huawei_utils.find_config_info( + self.iscsi_info, connector=connector, initiator=initiator) + + config_ips = self._get_config_target_ips(config_info) + LOG.info('Configured iscsi ips %s.', config_ips) + + target_ips = [] + target_iqns = [] + target_eths = [] + + for ip in config_ips: + ip_addr = ipaddress.ip_address(six.text_type(ip)) + normalized_ip = ip_addr.exploded + if normalized_ip in ip_iqn_map: + iqn = self._get_port_iqn(ip_iqn_map[normalized_ip][0]) + target_iqns.append(iqn) + target_eths.append(ip_iqn_map[normalized_ip][1]) + + for iqn in target_iqns: + ip = iqn.split(':', 5)[5] + if ipaddress.ip_address(six.text_type(ip)).version == 6: + ip = '[' + ip + ']' + target_ips.append(ip) + + if not target_ips or not target_iqns or not target_eths: + msg = _('Get iSCSI target ip&iqnð error.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get iscsi target_ips: %s, target_iqns: %s, target_eths: %s.', + target_ips, target_iqns, target_eths) + + return target_ips, target_iqns, target_eths, config_info + + +class CreateHostTask(task.Task): + default_provides = 'host_id' + + def __init__(self, client, protocol_info, configuration, *args, **kwargs): + super(CreateHostTask, self).__init__(*args, **kwargs) + self.client = client + self.protocol_info = protocol_info + self.configuration = configuration + + def _get_new_alua_info(self, config): + info = {'accessMode': '0'} + if config.get('ACCESSMODE') and config.get('HYPERMETROPATHOPTIMIZED'): + info.update({ + 'accessMode': config['ACCESSMODE'], + 'hyperMetroPathOptimized': config['HYPERMETROPATHOPTIMIZED'] + }) + + return info + + def execute(self, connector, initiator): + orig_host_name = connector['host'] + host_id = huawei_utils.get_host_id(self.client, orig_host_name) + info = {} + if self.configuration.is_dorado_v6: + config_info = huawei_utils.find_config_info( + self.protocol_info, connector=connector, initiator=initiator) + info = self._get_new_alua_info(config_info) + if host_id: + self.client.update_host(host_id, info) + if not host_id: + host_name = huawei_utils.encode_host_name(orig_host_name) + host_id = self.client.create_host(host_name, orig_host_name, info) + return host_id + + +class AddISCSIInitiatorTask(task.Task): + default_provides = 'chap_info' + + def __init__(self, client, iscsi_info, configuration, *args, **kwargs): + super(AddISCSIInitiatorTask, self).__init__(*args, **kwargs) + self.client = client + self.iscsi_info = iscsi_info + self.configuration = configuration + + def _get_chap_info(self, config): + chap_config = config.get('CHAPinfo') + if not chap_config: + return {} + + chap_name, chap_password = chap_config.split(';') + return {'CHAPNAME': chap_name, + 'CHAPPASSWORD': chap_password} + + def _get_alua_info(self, config): + alua_info = {'MULTIPATHTYPE': '0'} + if config.get('ACCESSMODE') and self.configuration.is_dorado_v6: + return alua_info + + if config.get('ALUA'): + alua_info['MULTIPATHTYPE'] = config['ALUA'] + + if alua_info['MULTIPATHTYPE'] == '1': + for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'): + if config.get(k): + alua_info[k] = config[k] + + return alua_info + + def execute(self, connector, host_id, config_info): + initiator = connector['initiator'] + self.client.add_iscsi_initiator(initiator) + + alua_info = self._get_alua_info(config_info) + self.client.associate_iscsi_initiator_to_host( + initiator, host_id, alua_info) + + chap_info = self._get_chap_info(config_info) + ini_info = self.client.get_iscsi_initiator(initiator) + if (ini_info['USECHAP'] == 'true' and not chap_info) or ( + ini_info['USECHAP'] == 'false' and chap_info): + self.client.update_iscsi_initiator_chap(initiator, chap_info) + + return chap_info + + +class CreateHostGroupTask(task.Task): + default_provides = 'hostgroup_id' + + def __init__(self, client, *args, **kwargs): + super(CreateHostGroupTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, host_id): + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = self.client.create_hostgroup(hostgroup_name) + self.client.associate_host_to_hostgroup(hostgroup_id, host_id) + return hostgroup_id + + +class CreateLunGroupTask(task.Task): + default_provides = 'lungroup_id' + + def __init__(self, client, configuration, *args, **kwargs): + super(CreateLunGroupTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, host_id, lun_id, lun_type): + lungroup_name = constants.LUNGROUP_PREFIX + host_id + lungroup_id = self.client.create_lungroup(lungroup_name) + mapping_view = self.client.get_mappingview_by_lungroup_id(lungroup_id) + is_associated_host = True if mapping_view else False + self.client.associate_lun_to_lungroup( + lungroup_id, lun_id, lun_type, + self.configuration.is_dorado_v6, is_associated_host) + return lungroup_id + + def revert(self, result, lun_id, lun_type, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.remove_lun_from_lungroup(result, lun_id, lun_type) + + +class CreateMappingViewTask(task.Task): + default_provides = ('mappingview_id', 'hostlun_id', 'aval_host_lun_ids') + + def __init__(self, client, *args, **kwargs): + super(CreateMappingViewTask, self).__init__(*args, **kwargs) + self.client = client + + def _get_hostlun_id(self, func, host_id, lun_id): + hostlun_id = func(host_id, lun_id) + if hostlun_id is None: + import time + time.sleep(3) + hostlun_id = func(host_id, lun_id) + + if hostlun_id is None: + msg = _("Can not get hostlun id. Maybe the storage is busy, " + "Please try it later") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return hostlun_id + + def execute(self, lun_id, lun_type, host_id, hostgroup_id, lungroup_id, + lun_info, portgroup_id=None): + mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id + mappingview_id = self.client.create_mappingview(mappingview_name) + self.client.associate_hostgroup_to_mappingview( + mappingview_id, hostgroup_id) + self.client.associate_lungroup_to_mappingview( + mappingview_id, lungroup_id) + if portgroup_id: + self.client.associate_portgroup_to_mappingview( + mappingview_id, portgroup_id) + + if lun_type == constants.LUN_TYPE: + hostlun_id = self._get_hostlun_id( + self.client.get_lun_host_lun_id, host_id, lun_info) + else: + hostlun_id = self._get_hostlun_id( + self.client.get_snapshot_host_lun_id, host_id, lun_id) + + mappingview_info = self.client.get_mappingview_by_id(mappingview_id) + aval_host_lun_ids = json.loads( + mappingview_info['AVAILABLEHOSTLUNIDLIST']) + return mappingview_id, hostlun_id, aval_host_lun_ids + + +class GetISCSIPropertiesTask(task.Task): + default_provides = 'mapping_info' + + def execute(self, connector, hostlun_id, target_iqns, target_ips, + chap_info, mappingview_id, aval_host_lun_ids, lun_id, + lun_info): + hostlun_id = int(hostlun_id) + mapping_info = { + 'target_discovered': False, + 'hostlun_id': hostlun_id, + 'mappingview_id': mappingview_id, + 'aval_host_lun_ids': aval_host_lun_ids, + 'lun_id': lun_id, + } + + if connector.get('multipath'): + mapping_info.update({ + 'target_iqns': target_iqns, + 'target_portals': ['%s:3260' % ip for ip in target_ips], + 'target_luns': [hostlun_id] * len(target_ips), + }) + else: + mapping_info.update({ + 'target_iqn': target_iqns[0], + 'target_portal': '%s:3260' % target_ips[0], + 'target_lun': hostlun_id, + }) + + if chap_info: + mapping_info['auth_method'] = 'CHAP' + mapping_info['auth_username'] = chap_info['CHAPNAME'] + mapping_info['auth_password'] = chap_info['CHAPPASSWORD'] + + if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE: + mapping_info['discard'] = True + + return mapping_info + + +class GetHyperMetroRemoteLunTask(task.Task): + default_provides = ('lun_id', 'lun_info') + + def __init__(self, client, hypermetro_id, *args, **kwargs): + super(GetHyperMetroRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_id = hypermetro_id + + def execute(self): + hypermetro_info = self.client.get_hypermetro_by_id(self.hypermetro_id) + remote_lun_id = hypermetro_info['LOCALOBJID'] + remote_lun_info = self.client.get_lun_info_by_id(remote_lun_id) + return remote_lun_id, remote_lun_info + + +class GetLunMappingTask(task.Task): + default_provides = ('mappingview_id', 'lungroup_id', 'hostgroup_id', + 'portgroup_id', 'host_id') + + def __init__(self, client, *args, **kwargs): + super(GetLunMappingTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, connector, lun_id): + if connector is None or 'host' not in connector: + mappingview_id, lungroup_id, hostgroup_id, portgroup_id, host_id = ( + huawei_utils.get_mapping_info(self.client, lun_id)) + return (mappingview_id, lungroup_id, hostgroup_id, portgroup_id, + host_id) + host_name = connector['host'] + host_id = huawei_utils.get_host_id(self.client, host_name) + if not host_id: + LOG.warning('Host %s not exist, return success for ' + 'connection termination.', host_name) + return None, None, None, None, None + + mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id + mappingview = self.client.get_mappingview_by_name(mappingview_name) + if not mappingview: + LOG.warning('Mappingview %s not exist, return success for ' + 'connection termination.', mappingview_name) + return None, None, None, None, host_id + + lungroup_id = self.client.get_lungroup_in_mappingview( + mappingview['ID']) + portgroup_id = self.client.get_portgroup_in_mappingview( + mappingview['ID']) + hostgroup_id = self.client.get_hostgroup_in_mappingview( + mappingview['ID']) + + return (mappingview['ID'], lungroup_id, hostgroup_id, portgroup_id, + host_id) + + +class ClearLunMappingTask(task.Task): + default_provides = 'ini_tgt_map' + + def __init__(self, client, configuration, fc_san=None, is_fc=False, *args, **kwargs): + super(ClearLunMappingTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + self.is_fc = is_fc + self.configuration = configuration + + def _get_obj_count_of_lungroup(self, lungroup_id): + lun_count = self.client.get_lun_count_of_lungroup(lungroup_id) + snap_count = self.client.get_snapshot_count_of_lungroup(lungroup_id) + return lun_count + snap_count + + def _delete_portgroup(self, mappingview_id, portgroup_id): + self.client.remove_portgroup_from_mappingview( + mappingview_id, portgroup_id) + + eth_ports = self.client.get_eth_ports_in_portgroup(portgroup_id) + fc_ports = self.client.get_fc_ports_in_portgroup(portgroup_id) + for p in [p['ID'] for p in eth_ports] + [p['ID'] for p in fc_ports]: + self.client.remove_port_from_portgroup(portgroup_id, p) + self.client.delete_portgroup(portgroup_id) + + def _delete_lungroup(self, mappingview_id, lungroup_id): + self.client.remove_lungroup_from_mappingview( + mappingview_id, lungroup_id) + self.client.delete_lungroup(lungroup_id) + + def _delete_hostgroup(self, mappingview_id, hostgroup_id, host_id): + self.client.remove_hostgroup_from_mappingview( + mappingview_id, hostgroup_id) + self.client.remove_host_from_hostgroup(hostgroup_id, host_id) + self.client.delete_hostgroup(hostgroup_id) + + def _delete_host(self, host_id): + iscsi_initiators = self.client.get_host_iscsi_initiators(host_id) + for ini in iscsi_initiators: + self.client.remove_iscsi_initiator_from_host(ini) + + fc_initiators = self.client.get_host_fc_initiators(host_id) + for ini in fc_initiators: + self.client.remove_fc_initiator_from_host(ini) + + self.client.delete_host(host_id) + + def _get_ini_tgt_map(self, connector, host_id): + ini_tgt_map = {} + portgroup = self.client.get_portgroup_by_name( + constants.PORTGROUP_PREFIX + host_id) + if portgroup: + ports = self.client.get_fc_ports_in_portgroup(portgroup['ID']) + port_wwns = [p['WWN'] for p in ports] + wwns = map(lambda x: x.lower(), connector['wwpns']) + for wwn in wwns: + ini_tgt_map[wwn] = port_wwns + + return ini_tgt_map + + def execute(self, connector, lun_id, lun_type, host_id, mappingview_id, + lungroup_id, hostgroup_id, portgroup_id): + obj_count = 0 + if lun_id and lungroup_id: + self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + + if lungroup_id: + obj_count = self._get_obj_count_of_lungroup(lungroup_id) + + # If lungroup still has member objects, don't clear mapping relation. + if obj_count > 0: + LOG.info('Lungroup %(lg)s still has %(count)s members.', + {'lg': lungroup_id, 'count': obj_count}) + return {} + if self.configuration.retain_storage_mapping: + return {} + + ini_tgt_map = {} + if self.fc_san and host_id: + ini_tgt_map = self._get_ini_tgt_map(connector, host_id) + + if mappingview_id and portgroup_id: + self._delete_portgroup(mappingview_id, portgroup_id) + if mappingview_id and not self.is_fc: + self.client.update_iscsi_initiator_chap( + connector.get('initiator'), chap_info=None) + if mappingview_id and lungroup_id: + self._delete_lungroup(mappingview_id, lungroup_id) + if mappingview_id and hostgroup_id: + self._delete_hostgroup(mappingview_id, hostgroup_id, host_id) + if mappingview_id: + self.client.delete_mapping_view(mappingview_id) + if host_id and not self.client.is_host_associate_inband_lun(host_id): + self._delete_host(host_id) + + return ini_tgt_map + + +class GetFCConnectionTask(task.Task): + default_provides = ('ini_tgt_map', 'tgt_port_wwns') + + def __init__(self, client, fc_san, configuration, *args, **kwargs): + super(GetFCConnectionTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + self.configuration = configuration + + def _get_fc_ports(self, wwns): + contr_map = {} + slot_map = {} + port_map = {} + + fc_ports = self.client.get_fc_ports() + for port in fc_ports: + if port['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: + contr = port['PARENTID'].split('.')[0] + slot = port['PARENTID'] + port_wwn = port['WWN'] + + if contr not in contr_map: + contr_map[contr] = [slot] + elif slot not in contr_map[contr]: + contr_map[contr].append(slot) + + if slot not in slot_map: + slot_map[slot] = [port_wwn] + elif port_wwn not in slot_map[slot]: + slot_map[slot].append(port_wwn) + + port_map[port_wwn] = { + 'id': port['ID'], + 'runspeed': int(port['RUNSPEED']), + 'slot': slot, + } + + fabrics = self._get_fabric(wwns, list(port_map.keys())) + if not fabrics: + msg = _("No valid fabric connection..") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return contr_map, slot_map, port_map, fabrics + + def _get_fabric(self, ini_port_wwns, tgt_port_wwns): + ini_tgt_map = self.fc_san.get_device_mapping_from_network( + ini_port_wwns, tgt_port_wwns) + + def _filter_not_connected_fabric(fabric_name, fabric): + ini_port_wwn_list = fabric.get('initiator_port_wwn_list') + tgt_port_wwn_list = fabric.get('target_port_wwn_list') + + if not ini_port_wwn_list or not tgt_port_wwn_list: + LOG.warning("Fabric %(fabric_name)s doesn't really " + "connect host and array: %(fabric)s.", + {'fabric_name': fabric_name, + 'fabric': fabric}) + return None + + return set(ini_port_wwn_list), set(tgt_port_wwn_list) + + valid_fabrics = [] + for fabric in ini_tgt_map: + pair = _filter_not_connected_fabric(fabric, ini_tgt_map[fabric]) + if pair: + valid_fabrics.append(pair) + + LOG.info("Got fabric: %s.", valid_fabrics) + return valid_fabrics + + def _count_port_weight(self, port): + port_bandwidth = port['runspeed'] + portgroup_ids = self.client.get_portgroup_by_port_id(port['id'], 212) + weight = 1.0 / port_bandwidth if port_bandwidth > 0 else 1.0 + + return len(portgroup_ids), weight + + def _select_port_per_fabric(self, port_map, candid_ports, used_slots): + used_slot_pairs = [] + other_slot_pairs = [] + for p in candid_ports: + weight = self._count_port_weight(port_map[p]) + + if port_map[p]['slot'] in used_slots: + used_slot_pairs.append((weight, p)) + else: + other_slot_pairs.append((weight, p)) + + new_port = None + if other_slot_pairs: + sorted_pairs = sorted(other_slot_pairs, key=lambda a: a[0]) + new_port = sorted_pairs[0][1] + if not new_port and used_slot_pairs: + sorted_pairs = sorted(used_slot_pairs, key=lambda a: a[0]) + new_port = sorted_pairs[0][1] + + return new_port + + def _select_ports_per_contr(self, fabrics, slots, slot_map, port_map): + contr_ports = set() + for slot in slots: + contr_ports.update(slot_map[slot]) + + if len(fabrics) == 1: + select_fabrics = fabrics * 2 + else: + select_fabrics = fabrics + + used_slots = set() + selected_ports = set() + for fabric in select_fabrics: + new_port = self._select_port_per_fabric( + port_map, fabric[1] & contr_ports, used_slots) + if new_port: + selected_ports.add(new_port) + used_slots.add(port_map[new_port]['slot']) + + return selected_ports + + def _get_ports_in_use(self, host_id): + portgroup = self.client.get_portgroup_by_name( + constants.PORTGROUP_PREFIX + host_id) + if not portgroup: + return [] + ports = self.client.get_fc_ports_in_portgroup(portgroup['ID']) + return [p['WWN'] for p in ports] + + def _get_fc_zone(self, wwns, host_id): + selected_ports = set() + ini_tgt_map = {} + + used_ports = self._get_ports_in_use(host_id) + if not used_ports: + contr_map, slot_map, port_map, fabrics = self._get_fc_ports(wwns) + for contr in contr_map: + ports = self._select_ports_per_contr( + fabrics, contr_map[contr], slot_map, port_map) + selected_ports.update(ports) + + for fabric in fabrics: + for ini in fabric[0]: + ini_tgt_map[ini] = list(selected_ports & fabric[1]) + + return ini_tgt_map, list(selected_ports) + used_ports + + def _get_divided_wwns(self, wwns, host_id): + invalid_wwns, effective_wwns = [], [] + for wwn in wwns: + wwn_info = self.client.get_fc_init_info(wwn) + if not wwn_info: + LOG.info("%s is not found in device, ignore it.", wwn) + continue + + if wwn_info.get('RUNNINGSTATUS') == constants.FC_INIT_ONLINE: + if wwn_info.get('ISFREE') == 'true': + effective_wwns.append(wwn) + continue + + if wwn_info.get('PARENTTYPE') == constants.PARENT_TYPE_HOST \ + and wwn_info.get('PARENTID') == host_id: + effective_wwns.append(wwn) + continue + + invalid_wwns.append(wwn) + + return invalid_wwns, effective_wwns + + def _get_fc_link(self, wwns, host_id): + invalid_wwns, effective_wwns = self._get_divided_wwns(wwns, host_id) + + if invalid_wwns: + if (self.configuration.min_fc_ini_online == + constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE): + msg = _("There are invalid initiators %s. If you want to " + "continue to attach volume to host, configure " + "MinFCIniOnline in the XML file.") % invalid_wwns + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(effective_wwns) < self.configuration.min_fc_ini_online: + msg = (("The number of online fc initiator %(wwns)s less than" + " the set number: %(set)s.") + % {"wwns": effective_wwns, + "set": self.configuration.min_fc_ini_online}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + ini_tgt_map = {} + tgt_port_wwns = set() + + for ini in effective_wwns: + tgts = self.client.get_fc_target_wwpns(ini) + ini_tgt_map[ini] = tgts + tgt_port_wwns.update(tgts) + + return ini_tgt_map, list(tgt_port_wwns) + + def execute(self, connector, host_id): + wwns = map(lambda x: x.lower(), connector['wwpns']) + + if self.fc_san: + ini_tgt_map, tgt_port_wwns = self._get_fc_zone(wwns, host_id) + else: + ini_tgt_map, tgt_port_wwns = self._get_fc_link(wwns, host_id) + + if not tgt_port_wwns: + msg = _('No fc connection for wwns %s.') % wwns + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return ini_tgt_map, tgt_port_wwns + + +class AddFCInitiatorTask(task.Task): + def __init__(self, client, fc_info, configuration, *args, **kwargs): + super(AddFCInitiatorTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_info = fc_info + self.configuration = configuration + + def _get_alua_info(self, config): + alua_info = {'MULTIPATHTYPE': '0'} + if config.get('ACCESSMODE') and self.configuration.is_dorado_v6: + return alua_info + + if config.get('ALUA'): + alua_info['MULTIPATHTYPE'] = config['ALUA'] + + if alua_info['MULTIPATHTYPE'] == '1': + for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'): + if config.get(k): + alua_info[k] = config[k] + + return alua_info + + def execute(self, host_id, ini_tgt_map, connector): + for ini in ini_tgt_map: + self.client.add_fc_initiator(ini) + + config_info = huawei_utils.find_config_info(self.fc_info, connector, + initiator=ini) + alua_info = self._get_alua_info(config_info) + self.client.associate_fc_initiator_to_host(host_id, ini, alua_info) + + +class CreateFCPortGroupTask(task.Task): + default_provides = 'portgroup_id' + + def __init__(self, client, fc_san, *args, **kwargs): + super(CreateFCPortGroupTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + + def _get_fc_ports(self): + port_map = {} + fc_ports = self.client.get_fc_ports() + for port in fc_ports: + port_map[port['WWN']] = port['ID'] + return port_map + + def _get_ports_to_add(self, ini_tgt_map): + ports = set() + for tgts in six.itervalues(ini_tgt_map): + ports |= set(tgts) + return ports + + def execute(self, host_id, ini_tgt_map): + if not self.fc_san: + return None + + portgroup_name = constants.PORTGROUP_PREFIX + host_id + portgroup_id = self.client.create_portgroup(portgroup_name) + port_map = self._get_fc_ports() + ports = self._get_ports_to_add(ini_tgt_map) + for port in ports: + self.client.add_port_to_portgroup(portgroup_id, port_map[port]) + return portgroup_id + + def revert(self, result, ini_tgt_map, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + port_map = self._get_fc_ports() + ports = self._get_ports_to_add(ini_tgt_map) + for port in ports: + self.client.remove_port_from_portgroup(result, port_map[port]) + + +class GetFCPropertiesTask(task.Task): + default_provides = 'mapping_info' + + def execute(self, ini_tgt_map, tgt_port_wwns, hostlun_id, mappingview_id, + aval_host_lun_ids, lun_id, lun_info): + hostlun_id = int(hostlun_id) + mapping_info = { + 'hostlun_id': hostlun_id, + 'mappingview_id': mappingview_id, + 'aval_host_lun_ids': aval_host_lun_ids, + 'target_discovered': True, + 'target_wwn': tgt_port_wwns, + 'target_lun': hostlun_id, + 'initiator_target_map': ini_tgt_map, + 'lun_id': lun_id, + } + + if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE: + mapping_info['discard'] = True + + return mapping_info + + +class ClassifyVolumeTask(task.Task): + default_provides = ('normal_volumes', 'replication_volumes') + + def execute(self, volumes): + normal_volumes = [] + replication_volumes = [] + + for v in volumes: + data = huawei_utils.to_dict(v.replication_driver_data) + if 'pair_id' in data: + replication_volumes.append(v) + else: + normal_volumes.append(v) + + return normal_volumes, replication_volumes + + +class FailoverVolumeTask(task.Task): + default_provides = 'volumes_update' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(FailoverVolumeTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def _failover_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + volume_update = {'volume_id': v.id, + 'updates': {'status': 'error'}} + volumes_update.append(volume_update) + + return volumes_update + + def execute(self, replication_volumes, normal_volumes): + volumes_update = self.replication.failover(replication_volumes) + volumes_update += self._failover_normal_volumes(normal_volumes) + return volumes_update + + +class FailbackVolumeTask(task.Task): + default_provides = 'volumes_update' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(FailbackVolumeTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def _failback_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + volume_update = {'volume_id': v.id, + 'updates': {'status': 'available'}} + volumes_update.append(volume_update) + + return volumes_update + + def execute(self, replication_volumes, normal_volumes): + volumes_update = self.replication.failback(replication_volumes) + volumes_update += self._failback_normal_volumes(normal_volumes) + return volumes_update + + +def create_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('create_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration), + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + AddQoSTask(local_cli, configuration), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration, + is_sync=False), + AddHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration), + AddReplicationGroupTask( + local_cli, replication_rmt_cli, configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_id = engine.storage.fetch('lun_id') + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_id, lun_info['WWN'], hypermetro_id, replication_id + + +def delete_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration): + store_spec = {'volume': volume} + work_flow = linear_flow.Flow('delete_volume') + work_flow.add( + CheckLunExistTask(local_cli), + CheckLunMappedTask(local_cli, + configuration), + DeleteReplicationTask(local_cli, replication_rmt_cli, + configuration), + DeleteHyperMetroTask(local_cli, hypermetro_rmt_cli, + configuration), + DeletePartitionTask(local_cli), + DeleteCacheTask(local_cli), + DeleteQoSTask(local_cli), + DeleteLunTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def migrate_volume(volume, host, local_cli, feature_support, configuration): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('migrate_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration), + CheckLunExistTask(local_cli), + CreateMigratedLunTask(local_cli, host, feature_support), + WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}), + CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}), + WaitMigrateDoneTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def create_volume_from_snapshot( + volume, src_obj, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + metadata = huawei_utils.get_volume_metadata(volume) + work_flow = linear_flow.Flow('create_volume_from_snapshot') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration), + CheckSnapshotExistTask(local_cli, inject={'snapshot': src_obj})) + + if (strutils.bool_from_string(metadata.get('fastclone', False)) or + (metadata.get('fastclone') is None and + configuration.clone_mode == "fastclone")): + work_flow.add( + LunClonePreCheckTask(inject={'src_volume': src_obj}), + CreateLunCloneTask(local_cli, + rebind={'src_id': 'snapshot_id'}) + ) + elif configuration.is_dorado_v6: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support, + inject={"src_size": src_obj.volume_size}), + WaitLunOnlineTask(local_cli), + CreateClonePairTask(local_cli, feature_support, configuration, + rebind={'source_id': 'snapshot_id', + 'target_id': 'lun_id'}), + WaitClonePairDoneTask(local_cli, configuration),) + else: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + CreateLunCopyTask(local_cli, feature_support, configuration), + WaitLunCopyDoneTask(local_cli, configuration),) + + general_params = {'local_cli': local_cli, + 'hypermetro_rmt_cli': hypermetro_rmt_cli, + 'replication_rmt_cli': replication_rmt_cli, + 'configuration': configuration} + return _create_volume_from_src( + work_flow, volume, store_spec, general_params) + + +def create_volume_from_volume( + volume, src_obj, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + metadata = huawei_utils.get_volume_metadata(volume) + work_flow = linear_flow.Flow('create_volume_from_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration), + CheckLunExistTask(local_cli, provides=('src_lun_info', 'src_id'), + inject={'volume': src_obj}), + ) + + if (strutils.bool_from_string(metadata.get('fastclone', False)) or + (metadata.get('fastclone') is None and + configuration.clone_mode == "fastclone")): + work_flow.add( + LunClonePreCheckTask(inject={'src_volume': src_obj}), + CreateLunCloneTask(local_cli) + ) + elif configuration.is_dorado_v6: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support, + inject={"src_size": src_obj.size}), + WaitLunOnlineTask(local_cli), + CreateClonePairTask(local_cli, feature_support, configuration, + rebind={'source_id': 'src_id', + 'target_id': 'lun_id'}), + WaitClonePairDoneTask(local_cli, configuration),) + else: + work_flow.add( + CreateTempSnapshotTask(local_cli, feature_support), + WaitSnapshotReadyTask(local_cli), + ActiveSnapshotTask(local_cli), + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + CreateLunCopyTask(local_cli, feature_support, configuration), + WaitLunCopyDoneTask(local_cli, configuration), + DeleteTempSnapshotTask(local_cli), + ) + + general_params = {'local_cli': local_cli, + 'hypermetro_rmt_cli': hypermetro_rmt_cli, + 'replication_rmt_cli': replication_rmt_cli, + 'configuration': configuration} + return _create_volume_from_src( + work_flow, volume, store_spec, general_params) + + +def _create_volume_from_src( + work_flow, volume, store_spec, general_params): + """ + Extracting Common Methods for create_volume_from_volume + and create_volume_from_snapshot + """ + local_cli = general_params.get('local_cli') + configuration = general_params.get('configuration') + replication_rmt_cli = general_params.get('replication_rmt_cli') + hypermetro_rmt_cli = general_params.get('hypermetro_rmt_cli') + work_flow.add( + ExtendVolumeTask(local_cli, inject={ + "new_size": int(volume.size) * constants.CAPACITY_UNIT}), + AddQoSTask(local_cli, configuration), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration), + AddHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration), + AddReplicationGroupTask( + local_cli, replication_rmt_cli, configuration),) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_id = engine.storage.fetch('lun_id') + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_id, lun_info['WWN'], hypermetro_id, replication_id + + +def create_snapshot(snapshot, local_cli, feature_support): + store_spec = {'snapshot': snapshot} + + work_flow = linear_flow.Flow('create_snapshot') + work_flow.add( + CreateSnapshotTask(local_cli, feature_support), + WaitSnapshotReadyTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + snapshot_id = engine.storage.fetch('snapshot_id') + snapshot_wwn = engine.storage.fetch('snapshot_wwn') + + return snapshot_id, snapshot_wwn + + +def delete_snapshot(snapshot, local_cli): + store_spec = {'snapshot': snapshot} + work_flow = linear_flow.Flow('delete_snapshot') + work_flow.add( + CheckSnapshotExistTask(local_cli), + DeleteSnapshotTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def extend_volume(volume, new_size, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration): + store_spec = {'volume': volume, + 'new_size': int(new_size) * constants.CAPACITY_UNIT} + work_flow = linear_flow.Flow('extend_volume') + work_flow.add( + CheckLunExistTask(local_cli), + ExtendHyperMetroTask(local_cli, hypermetro_rmt_cli, configuration), + ExtendReplicationTask(local_cli, replication_rmt_cli, configuration), + ExtendVolumeTask(local_cli) + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def retype(volume, new_opts, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('retype_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration, new_opts), + CheckLunExistTask(local_cli), + CheckLunIsInUse(), + UpdateLunTask(local_cli), + UpdateQoSTask(local_cli), + UpdateCacheTask(local_cli), + UpdatePartitionTask(local_cli), + DeleteHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration), + DeleteReplicationTask( + local_cli, replication_rmt_cli, configuration), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return hypermetro_id, replication_id + + +def retype_by_migrate(volume, new_opts, host, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('retype_volume_by_migrate') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration, new_opts), + CheckLunExistTask(local_cli), + CheckLunIsInUse(), + CreateMigratedLunTask(local_cli, host, feature_support), + WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}), + CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}), + WaitMigrateDoneTask(local_cli), + UpdateQoSTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration, + rebind={'lun_info': 'tgt_lun_info'}), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration, + rebind={'lun_info': 'tgt_lun_info'}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return hypermetro_id, replication_id + + +def manage_existing(volume, existing_ref, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('manage_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, configuration), + ManageVolumePreCheckTask( + local_cli, volume, existing_ref, configuration), + ManageLunTask(local_cli), + UpdateQoSTask(local_cli), + UpdateLunTask(local_cli), + UpdateCacheTask(local_cli), + UpdatePartitionTask(local_cli), + DeleteHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration), + DeleteReplicationTask( + local_cli, replication_rmt_cli, configuration), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_info['ID'], lun_info['WWN'], hypermetro_id, replication_id + + +def manage_existing_snapshot(snapshot, existing_ref, local_cli): + store_spec = {'snapshot': snapshot} + + work_flow = linear_flow.Flow('manage_snapshot') + work_flow.add( + ManageSnapshotPreCheckTask(local_cli, snapshot, existing_ref), + ManageSnapshotTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + snapshot_info = engine.storage.fetch('snapshot_info') + return snapshot_info['ID'], snapshot_info['WWN'] + + +def create_group(group, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + opts = huawei_utils.get_group_type_params(group, configuration.is_dorado_v6) + store_spec = {'group': group, + 'opts': opts} + + work_flow = linear_flow.Flow('create_group') + work_flow.add( + GroupOptsCheckTask(), + CreateHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration, + feature_support), + CreateReplicationGroupTask( + local_cli, replication_rmt_cli, configuration, + feature_support), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def initialize_iscsi_connection(lun, lun_type, connector, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type, + 'initiator': connector.get('initiator', '')} + work_flow = linear_flow.Flow('initialize_iscsi_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'})) + else: + work_flow.add( + CheckSnapshotExistTask( + client, provides=('lun_info', 'lun_id'), + rebind={'snapshot': 'lun'})) + + work_flow.add( + CreateHostTask(client, configuration.iscsi_info, configuration), + GetISCSIConnectionTask(client, configuration.iscsi_info), + AddISCSIInitiatorTask(client, configuration.iscsi_info, configuration), + CreateHostGroupTask(client), + CreateLunGroupTask(client, configuration), + CreateMappingViewTask(client), + GetISCSIPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def initialize_remote_iscsi_connection(hypermetro_id, connector, + client, configuration): + store_spec = {'connector': connector, + 'lun_type': constants.LUN_TYPE, + 'initiator': connector.get('initiator', '')} + work_flow = linear_flow.Flow('initialize_remote_iscsi_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + CreateHostTask(client, configuration.hypermetro['iscsi_info'], + configuration), + GetISCSIConnectionTask(client, configuration.hypermetro['iscsi_info']), + AddISCSIInitiatorTask(client, configuration.hypermetro['iscsi_info'], + configuration), + CreateHostGroupTask(client), + CreateLunGroupTask(client, configuration), + CreateMappingViewTask(client), + GetISCSIPropertiesTask(client), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def terminate_iscsi_connection(lun, lun_type, connector, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('terminate_iscsi_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add( + GetLunIDTask(client, rebind={'volume': 'lun'}), + ) + else: + work_flow.add( + GetSnapshotIDTask( + client, provides='lun_id', rebind={'snapshot': 'lun'}), + ) + + work_flow.add( + GetLunMappingTask(client), + ClearLunMappingTask(client, configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def terminate_remote_iscsi_connection(hypermetro_id, connector, client, + configuration): + store_spec = {'connector': connector} + work_flow = linear_flow.Flow('terminate_remote_iscsi_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + GetLunMappingTask(client), + ClearLunMappingTask(client, configuration, + inject={'lun_type': constants.LUN_TYPE}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def initialize_fc_connection(lun, lun_type, connector, fc_san, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type, + 'initiator': huawei_utils.convert_connector_wwns( + connector.get('wwpns', []) + )} + work_flow = linear_flow.Flow('initialize_fc_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'})) + else: + work_flow.add( + CheckSnapshotExistTask( + client, provides=('lun_info', 'lun_id'), + rebind={'snapshot': 'lun'})) + + work_flow.add( + CreateHostTask(client, configuration.fc_info, configuration), + GetFCConnectionTask(client, fc_san, configuration), + AddFCInitiatorTask(client, configuration.fc_info, configuration), + CreateHostGroupTask(client), + CreateLunGroupTask(client, configuration), + CreateFCPortGroupTask(client, fc_san), + CreateMappingViewTask(client), + GetFCPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def initialize_remote_fc_connection(hypermetro_id, connector, fc_san, client, + configuration): + store_spec = {'connector': connector, + 'lun_type': constants.LUN_TYPE, + 'initiator': huawei_utils.convert_connector_wwns( + connector.get('wwpns', []) + )} + work_flow = linear_flow.Flow('initialize_remote_fc_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + CreateHostTask(client, configuration.hypermetro['fc_info'], + configuration), + GetFCConnectionTask(client, fc_san, configuration), + AddFCInitiatorTask(client, configuration.hypermetro['fc_info'], + configuration), + CreateHostGroupTask(client), + CreateLunGroupTask(client, configuration), + CreateFCPortGroupTask(client, fc_san), + CreateMappingViewTask(client), + GetFCPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def terminate_fc_connection(lun, lun_type, connector, fc_san, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('terminate_fc_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add( + GetLunIDTask(client, rebind={'volume': 'lun'}), + ) + else: + work_flow.add( + GetSnapshotIDTask( + client, provides='lun_id', rebind={'snapshot': 'lun'}), + ) + + work_flow.add( + GetLunMappingTask(client), + ClearLunMappingTask(client, configuration, fc_san, is_fc=True), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('ini_tgt_map') + + +def terminate_remote_fc_connection(hypermetro_id, connector, fc_san, client, + configuration): + store_spec = {'connector': connector} + work_flow = linear_flow.Flow('terminate_remote_fc_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + GetLunMappingTask(client), + ClearLunMappingTask(client, configuration, fc_san, is_fc=True, + inject={'lun_type': constants.LUN_TYPE}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('ini_tgt_map') + + +def failover(volumes, local_cli, replication_rmt_cli, configuration): + store_spec = {'volumes': volumes} + work_flow = linear_flow.Flow('failover') + work_flow.add( + ClassifyVolumeTask(), + FailoverVolumeTask(local_cli, replication_rmt_cli, + configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + volumes_update = engine.storage.fetch('volumes_update') + return volumes_update + + +def failback(volumes, local_cli, replication_rmt_cli, configuration): + store_spec = {'volumes': volumes} + work_flow = linear_flow.Flow('failback') + work_flow.add( + ClassifyVolumeTask(), + FailbackVolumeTask(local_cli, replication_rmt_cli, + configuration), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + volumes_update = engine.storage.fetch('volumes_update') + return volumes_update + + +def revert_to_snapshot(snapshot, local_cli, rollback_speed): + store_spec = {'snapshot': snapshot} + work_flow = linear_flow.Flow('revert_to_snapshot') + work_flow.add( + CheckSnapshotExistTask(local_cli), + RevertToSnapshotTask(local_cli, rollback_speed), + WaitSnapshotRollbackDoneTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() diff --git a/Cinder/Bobcat/huawei_utils.py b/Cinder/Bobcat/huawei_utils.py new file mode 100644 index 0000000..3cc9dc3 --- /dev/null +++ b/Cinder/Bobcat/huawei_utils.py @@ -0,0 +1,703 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import json +import re + +from oslo_log import log as logging +from oslo_utils import strutils +import tenacity as retry_module +import six + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields +from cinder.volume.drivers.huawei import constants +from cinder.volume import qos_specs +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +def encode_name(name): + encoded_name = hashlib.md5(name.encode('utf-8')).hexdigest() + prefix = name.split('-')[0] + '-' + postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)] + return prefix + postfix + + +def old_encode_name(name): + pre_name = name.split("-")[0] + vol_encoded = six.text_type(hash(name)) + if vol_encoded.startswith('-'): + newuuid = pre_name + vol_encoded + else: + newuuid = pre_name + '-' + vol_encoded + return newuuid + + +def encode_host_name(name): + if name and len(name) > constants.MAX_NAME_LENGTH: + encoded_name = hashlib.md5(name.encode('utf-8')).hexdigest() + return encoded_name[:constants.MAX_NAME_LENGTH] + return name + + +def old_encode_host_name(name): + if name and len(name) > constants.MAX_NAME_LENGTH: + name = six.text_type(hash(name)) + return name + + +def wait_for_condition(func, interval, timeout): + def _retry_on_result(result): + return not result + + def _retry_on_exception(): + return False + + def _retry_use_retrying(): + ret = retry_module.Retrying(retry_on_result=_retry_on_result, + retry_on_exception=_retry_on_exception, + wait_fixed=interval * 1000, + stop_max_delay=timeout * 1000) + ret.call(func) + + def _retry_use_tenacity(): + ret = retry_module.Retrying( + wait=retry_module.wait_fixed(interval), + retry=retry_module.retry_if_result(_retry_on_result), + stop=retry_module.stop_after_delay(timeout) + ) + ret(func) + + _retry_use_tenacity() + + +def _get_volume_type(volume): + if volume.volume_type: + return volume.volume_type + if volume.volume_type_id: + return volume_types.get_volume_type(None, volume.volume_type_id) + + +def get_volume_params(volume, is_dorado_v6=False): + volume_type = _get_volume_type(volume) + return get_volume_type_params(volume_type, is_dorado_v6) + + +def get_volume_type_params(volume_type, is_dorado_v6=False): + specs = {} + if isinstance(volume_type, dict) and volume_type.get('extra_specs'): + specs = volume_type['extra_specs'] + elif isinstance(volume_type, objects.VolumeType + ) and volume_type.extra_specs: + specs = volume_type.extra_specs + + vol_params = get_volume_params_from_specs(specs) + vol_params['qos'] = None + + if isinstance(volume_type, dict) and volume_type.get('qos_specs_id'): + vol_params['qos'] = _get_qos_specs(volume_type['qos_specs_id'], + is_dorado_v6) + elif isinstance(volume_type, objects.VolumeType + ) and volume_type.qos_specs_id: + vol_params['qos'] = _get_qos_specs(volume_type.qos_specs_id, + is_dorado_v6) + + LOG.info('volume opts %s.', vol_params) + return vol_params + + +def get_volume_params_from_specs(specs): + opts = _get_opts_from_specs(specs) + + _verify_smartcache_opts(opts) + _verify_smartpartition_opts(opts) + _verify_smartthin_opts(opts) + _verify_controller_opts(opts) + _verify_application_type_opts(opts) + + return opts + + +def _get_bool_param(k, v): + words = v.split() + if len(words) == 2 and words[0] == '': + return strutils.bool_from_string(words[1], strict=True) + + msg = _("%(k)s spec must be specified as %(k)s=' True' " + "or ' False'.") % {'k': k} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _get_replication_type_param(k, v): + words = v.split() + if len(words) == 2 and words[0] == '': + REPLICA_SYNC_TYPES = {'sync': constants.REPLICA_SYNC_MODEL, + 'async': constants.REPLICA_ASYNC_MODEL} + sync_type = words[1].lower() + if sync_type in REPLICA_SYNC_TYPES: + return REPLICA_SYNC_TYPES[sync_type] + + msg = _("replication_type spec must be specified as " + "replication_type=' sync' or ' async'.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _get_string_param(k, v): + if not v: + msg = _("%s spec must be specified as a string.") % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return v + + +def _get_opts_from_specs(specs): + """Get the well defined extra specs.""" + opts = {} + + opts_capability = { + 'capabilities:smarttier': (_get_bool_param, False), + 'capabilities:smartcache': (_get_bool_param, False), + 'capabilities:smartpartition': (_get_bool_param, False), + 'capabilities:thin_provisioning_support': (_get_bool_param, False), + 'capabilities:thick_provisioning_support': (_get_bool_param, False), + 'capabilities:hypermetro': (_get_bool_param, False), + 'capabilities:replication_enabled': (_get_bool_param, False), + 'replication_type': (_get_replication_type_param, + constants.REPLICA_ASYNC_MODEL), + 'smarttier:policy': (_get_string_param, None), + 'smartcache:cachename': (_get_string_param, None), + 'smartpartition:partitionname': (_get_string_param, None), + 'huawei_controller:controllername': (_get_string_param, None), + 'capabilities:dedup': (_get_bool_param, None), + 'capabilities:compression': (_get_bool_param, None), + 'capabilities:huawei_controller': (_get_bool_param, False), + 'capabilities:huawei_application_type': (_get_bool_param, False), + 'huawei_application_type:applicationname': (_get_string_param, None), + } + + def _get_opt_key(spec_key): + key_split = spec_key.split(':') + if len(key_split) == 1: + return key_split[0] + else: + return key_split[1] + + for spec_key in opts_capability: + opt_key = _get_opt_key(spec_key) + opts[opt_key] = opts_capability[spec_key][1] + + for key, value in six.iteritems(specs): + if key not in opts_capability: + continue + + func = opts_capability[key][0] + opt_key = _get_opt_key(key) + opts[opt_key] = func(key, value) + + return opts + + +def _check_and_set_qos_info(specs, qos): + for key, value in specs.items(): + if key in constants.QOS_IGNORED_PARAMS: + LOG.info("this qos spec param %s is front-end qos param, " + "backend driver will ignore it", key) + continue + + if key not in constants.QOS_SPEC_KEYS: + msg = _('Invalid QoS %s specification.') % key + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if key != 'IOType' and int(value) <= 0: + msg = _('QoS config is wrong. %s must > 0.') % key + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + qos[key.upper()] = value + + +def _get_qos_specs(qos_specs_id, is_dorado_v6): + ctxt = context.get_admin_context() + qos_specs_info = qos_specs.get_qos_specs(ctxt, qos_specs_id) + if qos_specs_info is None: + return {} + + if qos_specs_info.get('consumer') == 'front-end': + return {} + + specs = qos_specs_info.get('specs', {}) + LOG.info('The QoS specs is: %s.', specs) + + qos = {'IOTYPE': specs.pop('IOType', None)} + + if qos['IOTYPE'] not in constants.QOS_IOTYPES: + msg = _('IOType must be in %(types)s.' + ) % {'types': constants.QOS_IOTYPES} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + _check_and_set_qos_info(specs, qos) + + if len(qos) < 2: + msg = _('QoS policy must specify both IOType and one another ' + 'qos spec, got policy: %s.') % qos + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if is_dorado_v6: + return qos + + qos_keys = set(qos.keys()) + if (qos_keys & set(constants.UPPER_LIMIT_KEYS) and + qos_keys & set(constants.LOWER_LIMIT_KEYS)): + msg = _('QoS policy upper limit and lower limit ' + 'conflict, QoS policy: %s.') % qos + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + return qos + + +def _verify_smartthin_opts(opts): + if (opts['thin_provisioning_support'] and + opts['thick_provisioning_support']): + msg = _('Cannot set thin and thick at the same time.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + elif opts['thin_provisioning_support']: + opts['LUNType'] = constants.THIN_LUNTYPE + elif opts['thick_provisioning_support']: + opts['LUNType'] = constants.THICK_LUNTYPE + + +def _verify_smartcache_opts(opts): + if opts['smartcache'] and not opts['cachename']: + msg = _('Cache name is not specified, please set ' + 'smartcache:cachename in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_application_type_opts(opts): + if opts['huawei_application_type'] and not opts['applicationname']: + msg = _('WorkloadType name is None, please set ' + 'huawei_application_type:applicationname in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_controller_opts(opts): + if opts['huawei_controller'] and not opts['controllername']: + msg = _('Controller name is None, please set ' + 'huawei_controller:controllername in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_smartpartition_opts(opts): + if opts['smartpartition'] and not opts['partitionname']: + msg = _('Partition name is not specified, please set ' + 'smartpartition:partitionname in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def wait_lun_online(client, lun_id, wait_interval=None, wait_timeout=None): + def _lun_online(): + result = client.get_lun_info_by_id(lun_id) + if result['HEALTHSTATUS'] not in (constants.STATUS_HEALTH, + constants.STATUS_INITIALIZE): + err_msg = _('LUN %s is abnormal.') % lun_id + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if result['RUNNINGSTATUS'] in (constants.LUN_INITIALIZING, + constants.STATUS_INITIALIZE): + return False + + return True + + if not wait_interval: + wait_interval = constants.DEFAULT_WAIT_INTERVAL + if not wait_timeout: + wait_timeout = wait_interval * 10 + + wait_for_condition(_lun_online, wait_interval, wait_timeout) + + +def is_not_exist_exc(exc): + msg = getattr(exc, 'msg', '') + return 'not exist' in msg + + +def to_string(**kwargs): + return json.dumps(kwargs) if kwargs else '' + + +def to_dict(text): + return json.loads(text) if text else {} + + +def get_volume_private_data(volume): + if not volume.provider_location: + return {} + + try: + info = json.loads(volume.provider_location) + except Exception: + LOG.exception("Decode provider_location error") + return {} + + if isinstance(info, dict): + if "huawei" in volume.provider_location: + info['hypermetro'] = (info.get('hypermetro_id') + or info.get('hypermetro')) + return info + else: + return {} + + # To keep compatible with old driver version + return {'huawei_lun_id': six.text_type(info), + 'huawei_lun_wwn': volume.admin_metadata.get('huawei_lun_wwn'), + 'huawei_sn': volume.metadata.get('huawei_sn'), + 'hypermetro': True if volume.metadata.get( + 'hypermetro_id') else False, + } + + +def get_volume_metadata(volume): + if isinstance(volume, objects.Volume): + return volume.metadata + if volume.get('volume_metadata'): + return {item['key']: item['value'] for item in + volume['volume_metadata']} + return {} + + +def get_replication_data(volume): + if not volume.replication_driver_data: + return {} + + return json.loads(volume.replication_driver_data) + + +def get_snapshot_private_data(snapshot): + if not snapshot.provider_location: + return {} + + info = json.loads(snapshot.provider_location) + if isinstance(info, dict): + return info + + # To keep compatible with old driver version + return {'huawei_snapshot_id': six.text_type(info), + 'huawei_snapshot_wwn': snapshot.metadata.get( + 'huawei_snapshot_wwn'), + } + + +def get_external_lun_info(client, external_ref): + lun_info = None + if 'source-id' in external_ref: + lun = client.get_lun_info_by_id(external_ref['source-id']) + lun_info = client.get_lun_info_by_name(lun['NAME']) + elif 'source-name' in external_ref: + lun_info = client.get_lun_info_by_name(external_ref['source-name']) + + return lun_info + + +def get_external_snapshot_info(client, external_ref): + snapshot_info = None + if 'source-id' in external_ref: + snapshot_info = client.get_snapshot_info_by_id( + external_ref['source-id']) + elif 'source-name' in external_ref: + snapshot_info = client.get_snapshot_info_by_name( + external_ref['source-name']) + + return snapshot_info + + +def get_lun_info(client, volume): + metadata = get_volume_private_data(volume) + + volume_name = encode_name(volume.id) + lun_info = client.get_lun_info_by_name(volume_name) + + # If new encoded way not found, try the old encoded way. + if not lun_info: + volume_name = old_encode_name(volume.id) + lun_info = client.get_lun_info_by_name(volume_name) + + if not lun_info and metadata.get('huawei_lun_id'): + lun_info = client.get_lun_info_filter_id(metadata['huawei_lun_id']) + + if lun_info and ('huawei_lun_wwn' in metadata and + lun_info.get('WWN') != metadata['huawei_lun_wwn']): + lun_info = None + + # Judge whether this volume has experienced data migration or not + if not lun_info: + volume_name = encode_name(volume.name_id) + lun_info = client.get_lun_info_by_name(volume_name) + + if not lun_info: + volume_name = old_encode_name(volume.name_id) + lun_info = client.get_lun_info_by_name(volume_name) + + return lun_info + + +def get_snapshot_info(client, snapshot): + name = encode_name(snapshot.id) + snapshot_info = client.get_snapshot_info_by_name(name) + + # If new encoded way not found, try the old encoded way. + if not snapshot_info: + name = old_encode_name(snapshot.id) + snapshot_info = client.get_snapshot_info_by_name(name) + + return snapshot_info + + +def get_host_id(client, host_name): + encoded_name = encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + if encoded_name == host_name: + return host_id + + if not host_id: + encoded_name = old_encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + + return host_id + + +def get_hypermetro_group(client, group_id): + encoded_name = encode_name(group_id) + group = client.get_metrogroup_by_name(encoded_name) + if not group: + encoded_name = old_encode_name(group_id) + group = client.get_metrogroup_by_name(encoded_name) + return group + + +def get_replication_group(client, group_id): + encoded_name = encode_name(group_id) + group = client.get_replication_group_by_name(encoded_name) + if not group: + encoded_name = old_encode_name(group_id) + group = client.get_replication_group_by_name(encoded_name) + return group + + +def get_volume_model_update(volume, **kwargs): + private_data = get_volume_private_data(volume) + + if kwargs.get('hypermetro_id'): + private_data['hypermetro'] = True + else: + private_data['hypermetro'] = False + if 'hypermetro_id' in private_data: + private_data.pop('hypermetro_id') + private_data['hypermetro'] = False + + if 'huawei_lun_id' in kwargs: + private_data['huawei_lun_id'] = kwargs['huawei_lun_id'] + if 'huawei_lun_wwn' in kwargs: + private_data['huawei_lun_wwn'] = kwargs['huawei_lun_wwn'] + if 'huawei_sn' in kwargs: + private_data['huawei_sn'] = kwargs['huawei_sn'] + + model_update = {'provider_location': to_string(**private_data)} + + if kwargs.get('replication_id'): + model_update['replication_driver_data'] = to_string( + pair_id=kwargs.get('replication_id')) + model_update['replication_status'] = fields.ReplicationStatus.ENABLED + else: + model_update['replication_driver_data'] = None + model_update['replication_status'] = fields.ReplicationStatus.DISABLED + + return model_update + + +def get_group_type_params(group, is_dorado_v6=False): + opts = [] + for volume_type in group.volume_types: + opt = get_volume_type_params(volume_type, is_dorado_v6) + opts.append(opt) + return opts + + +def get_hypermetro(client, volume): + lun_name = encode_name(volume.id) + hypermetro = client.get_hypermetro_by_lun_name(lun_name) + return hypermetro + + +def get_config_info_by_ini_name(ini_info, initiator): + """ + get config ini_info by initiator name + """ + LOG.info("begin to get config info by ini name," + "ini_info is %s, ini is %s", ini_info, initiator) + for _, info in ini_info.items(): + ini_name = info.get('Name') + if not ini_name: + continue + # when create host task in initialize_fc_connection just return + # the first info in config_info, this scenarios initiator is a list of ini name, + # In other scenarios, initiator is a string of ini name. + if isinstance(initiator, list) and ini_name in initiator: + return info + if ini_name == initiator: + return info + return {} + + +def get_config_info_by_host_name(ini_info, host_name): + """ + get config ini_info by host name + """ + LOG.info("begin to get config info by host name," + "ini_info is %s, host_name is %s", ini_info, host_name) + for _, info in ini_info.items(): + info_host_name = info.get('HostName') + if not info_host_name: + continue + if info_host_name == '*': + return info + if re.search(info_host_name, host_name): + return info + return {} + + +def find_config_info(protocol_info, connector=None, initiator=None): + """ + parse get initiator config in huawei config xml + params: + protocol_info: a dict with protocol info like this: + protocol_info = {'initiators': {'iscsi_ini_01': {'Name': 'iscsi_ini_01', 'ALUA': '1'}}} + connector: some info of host,such as host_name, os_type + initiator: initiator name of host, may be a string or a list. + returns: + return a dict with the final config info of initiator, + if protocol_info = {'initiators': {'iscsi_ini_01': {'Name': 'iscsi_ini_01', 'ALUA': '1'}}} + will return {'Name': 'iscsi_ini_01', 'ALUA': '1'} + """ + all_ini_info = protocol_info.get('initiators', {}) + + find_info = get_config_info_by_ini_name(all_ini_info, initiator) + if not find_info: + find_info = get_config_info_by_host_name(all_ini_info, connector.get('host')) + + return find_info + + +def is_support_clone_pair(client): + array_info = client.get_array_info() + version_info = array_info['PRODUCTVERSION'] + if version_info >= constants.SUPPORT_CLONE_PAIR_VERSION: + return True + + +def need_migrate(volume, host, new_opts, orig_lun_info): + if volume.host != host['host']: + return True + elif ('LUNType' in new_opts and + new_opts['LUNType'] != orig_lun_info['ALLOCTYPE']): + return True + elif (new_opts['compression'] and + not (orig_lun_info.get('ENABLECOMPRESSION') == 'true')): + return True + elif (new_opts['dedup'] and + not (orig_lun_info.get('ENABLESMARTDEDUP') == 'true')): + return True + return False + + +def remove_lun_from_lungroup(client, lun_id, force_delete_volume): + lun_group_ids = client.get_lungroup_ids_by_lun_id(lun_id) + if lun_group_ids: + if force_delete_volume: + for lun_group_id in lun_group_ids: + client.remove_lun_from_lungroup(lun_group_id, lun_id, + constants.LUN_TYPE) + elif len(lun_group_ids) == 1: + client.remove_lun_from_lungroup(lun_group_ids[0], lun_id, + constants.LUN_TYPE) + + +def get_mapping_info(client, lun_id): + mappingview_id, lungroup_id, hostgroup_id, portgroup_id, host_id = ( + None, None, None, None, None) + lungroup_ids = client.get_lungroup_ids_by_lun_id(lun_id) + if len(lungroup_ids) == 1: + lungroup_id = lungroup_ids[0] + mapping_infos = client.get_mappingview_by_lungroup_id(lungroup_id) + if len(mapping_infos) == 1: + mapping_info = mapping_infos[0] + mappingview_id = mapping_info.get('ID') + hostgroup_id = mapping_info.get('hostGroupId') + portgroup_id = mapping_info.get('portGroupId') + host_ids = client.get_host_by_hostgroup_id(hostgroup_id) + if len(host_ids) == 1: + host_id = host_ids[0] + elif len(lungroup_ids) < 1: + LOG.warning('lun %s is not added to the lungroup', lun_id) + else: + LOG.warning('lun %s is is added to multiple lungroups', lun_id) + + return mappingview_id, lungroup_id, hostgroup_id, portgroup_id, host_id + + +def check_volume_type_valid(opt): + if opt.get('hypermetro') and opt.get('replication_enabled'): + msg = _("Hypermetro and replication cannot be " + "specified at the same volume_type.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +def mask_dict_sensitive_info(data, secret="***"): + # mask sensitive data in the dictionary + if not isinstance(data, dict): + return data + + out = {} + for key, value in data.items(): + if isinstance(value, dict): + value = mask_dict_sensitive_info(value, secret=secret) + elif key in constants.SENSITIVE_KEYS: + value = secret + out[key] = value + + return strutils.mask_dict_password(out) + + +def convert_connector_wwns(wwns): + return [x.lower() for x in wwns] diff --git a/Cinder/Bobcat/hypermetro.py b/Cinder/Bobcat/hypermetro.py new file mode 100644 index 0000000..52ed19d --- /dev/null +++ b/Cinder/Bobcat/hypermetro.py @@ -0,0 +1,345 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import strutils + +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + + +LOG = logging.getLogger(__name__) + + +class _CheckCreateConditionTask(task.Task): + default_provides = set(('domain_id', 'remote_pool_id')) + + def __init__(self, client, hypermetro_configs, *args, **kwargs): + super(_CheckCreateConditionTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_configs = hypermetro_configs + + def execute(self): + domain_name = self.hypermetro_configs['metro_domain'] + domain_id = self.client.get_hypermetro_domain_id(domain_name) + if not domain_id: + msg = _("Hypermetro domain %s doesn't exist.") % domain_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Get the remote pool info. + hypermetro_pool = self.hypermetro_configs['storage_pools'][0] + pool_id = self.client.get_pool_id(hypermetro_pool) + if not pool_id: + msg = _("Remote pool %s does not exist.") % hypermetro_pool + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return {'domain_id': domain_id, + 'remote_pool_id': pool_id} + + +class _CreateRemoteLunTask(task.Task): + default_provides = set(('remote_lun_id',)) + + def __init__(self, client, *args, **kwargs): + super(_CreateRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_params, remote_pool_id): + # Create remote hypermetro lun. + lun_params['PARENTID'] = remote_pool_id + remote_lun = self.client.create_lun(lun_params) + huawei_utils.wait_lun_online(self.client, remote_lun['ID']) + return {'remote_lun_id': remote_lun['ID']} + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result['remote_lun_id']) + + +class _CreateHyperMetroTask(task.Task): + default_provides = set(('hypermetro_id',)) + + def __init__(self, client, hypermetro_configs, is_sync, *args, **kwargs): + super(_CreateHyperMetroTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_configs = hypermetro_configs + self.sync = is_sync + + def _is_sync_completed(self, metro_id): + metro_info = self.client.get_hypermetro_by_id(metro_id) + if ((metro_info['HEALTHSTATUS'] != constants.METRO_HEALTH_NORMAL) or + metro_info['RUNNINGSTATUS'] not in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC, + constants.RUNNING_TO_BE_SYNC)): + msg = _("HyperMetro pair %(id)s is not in a available status, " + "RunningStatus is: %(run)s, HealthStatus is: %(health)s" + ) % {"id": metro_id, + "run": metro_info.get('RUNNINGSTATUS'), + "health": metro_info.get("HEALTHSTATUS")} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if metro_info.get('RUNNINGSTATUS') == constants.METRO_RUNNING_NORMAL: + return True + return False + + def execute(self, domain_id, local_lun_id, remote_lun_id): + hypermetro_param = {"DOMAINID": domain_id, + "HCRESOURCETYPE": '1', + "ISFIRSTSYNC": False, + "LOCALOBJID": local_lun_id, + "REMOTEOBJID": remote_lun_id, + "SPEED": self.hypermetro_configs['sync_speed']} + if self.sync: + hypermetro_param.update({"ISFIRSTSYNC": True}) + + hypermetro_pair = self.client.create_hypermetro( + hypermetro_param) + if self.sync: + self.client.sync_hypermetro(hypermetro_pair['ID']) + if strutils.bool_from_string( + self.hypermetro_configs['metro_sync_completed']): + huawei_utils.wait_for_condition( + lambda: self._is_sync_completed(hypermetro_pair['ID']), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + return {'hypermetro_id': hypermetro_pair['ID']} + + +class HuaweiHyperMetro(object): + def __init__(self, local_cli, remote_cli, configs): + self.local_cli = local_cli + self.remote_cli = remote_cli + self.configs = configs.hypermetro + self.configuration = configs + + def create_hypermetro(self, local_lun_id, lun_params, is_sync): + LOG.info('To create hypermetro for local lun %s', local_lun_id) + + store_spec = {'local_lun_id': local_lun_id, + 'lun_params': lun_params} + work_flow = linear_flow.Flow('create_hypermetro') + work_flow.add(_CheckCreateConditionTask(self.remote_cli, self.configs), + _CreateRemoteLunTask(self.remote_cli), + _CreateHyperMetroTask(self.local_cli, self.configs, + is_sync)) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('hypermetro_id') + + def delete_hypermetro(self, volume): + lun_name = huawei_utils.encode_name(volume.id) + hypermetro = self.local_cli.get_hypermetro_by_lun_name(lun_name) + + # Judge whether this volume has experienced data migration or not + if not hypermetro: + lun_name = huawei_utils.encode_name(volume.name_id) + hypermetro = self.local_cli.get_hypermetro_by_lun_name(lun_name) + + if hypermetro: + huawei_utils.remove_lun_from_lungroup( + self.remote_cli, hypermetro['REMOTEOBJID'], + self.configuration.force_delete_volume) + if (hypermetro['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(hypermetro['ID']) + + self.local_cli.delete_hypermetro(hypermetro['ID']) + self.remote_cli.delete_lun(hypermetro['REMOTEOBJID']) + else: + remote_lun_info = self.remote_cli.get_lun_info_by_name(lun_name) + if remote_lun_info: + self.remote_cli.delete_lun(remote_lun_info['ID']) + + def extend_hypermetro(self, hypermetro_id, new_size): + LOG.info('Extend hypermetro pair %s', hypermetro_id) + metro_info = self.remote_cli.get_hypermetro_by_id(hypermetro_id) + metrogroup = None + if metro_info['ISINCG'] == 'true': + cg_id = metro_info['CGID'] + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, cg_id) + + if metrogroup: + self._stop_consistencygroup_if_need(metrogroup) + elif ((metro_info['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL) + and metro_info['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(hypermetro_id) + + try: + self.remote_cli.extend_lun(metro_info['LOCALOBJID'], new_size) + self.local_cli.extend_lun(metro_info['REMOTEOBJID'], new_size) + finally: + if metrogroup: + self.local_cli.sync_metrogroup(metrogroup['ID']) + else: + self.local_cli.sync_hypermetro(hypermetro_id) + + def sync_hypermetro(self, hypermetro_id): + self.local_cli.sync_hypermetro(hypermetro_id) + + def create_consistencygroup(self, group_id): + LOG.info("Create hypermetro consistency group %s.", group_id) + + domain_name = self.configs['metro_domain'] + domain_id = self.local_cli.get_hypermetro_domain_id(domain_name) + if not domain_id: + msg = _("Hypermetro domain %s doesn't exist.") % domain_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + params = {"NAME": huawei_utils.encode_name(group_id), + "DESCRIPTION": group_id, + "RECOVERYPOLICY": "1", + "SPEED": self.configs['sync_speed'], + "DOMAINID": domain_id} + self.local_cli.create_metrogroup(params) + + def delete_consistencygroup(self, group_id, volumes): + LOG.info("Delete hypermetro consistency group %s.", group_id) + + metrogroup = huawei_utils.get_hypermetro_group(self.local_cli, + group_id) + if not metrogroup: + LOG.warning('Hypermetro group %s to delete not exist.', + group_id) + return + + self._stop_consistencygroup_if_need(metrogroup) + self._remove_volume_from_metrogroup(volumes, metrogroup['ID']) + self.local_cli.delete_metrogroup(metrogroup['ID']) + + def _check_metro_in_group(self, metrogroup_id, metro_id): + metro_info = self.local_cli.get_hypermetro_by_id(metro_id) + return (metro_info and metro_info.get('ISINCG') == 'true' and + metro_info.get('CGID') == metrogroup_id) + + def _ensure_hypermetro_in_group(self, metrogroup_id, metro_ids): + for metro_id in metro_ids: + huawei_utils.wait_for_condition( + lambda: self._check_metro_in_group(metrogroup_id, metro_id), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + + def _ensure_hypermetro_not_in_group(self, metrogroup_id, metro_ids): + for metro_id in metro_ids: + huawei_utils.wait_for_condition( + lambda: not self._check_metro_in_group(metrogroup_id, + metro_id), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + self.local_cli.sync_hypermetro(metro_id) + + def _add_volume_to_metrogroup(self, volumes, metrogroup_id): + metro_ids = [] + for volume in volumes: + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + LOG.warning("Volume %s doesn't have hypermetro.", volume.id) + continue + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + LOG.warning("Volume %s doesn't have hypermetro on the array.", + volume.id) + continue + + metro_id = hypermetro['ID'] + self._stop_hypermetro_if_need(metro_id) + self.local_cli.add_metro_to_metrogroup(metrogroup_id, metro_id) + metro_ids.append(metro_id) + + self._ensure_hypermetro_in_group(metrogroup_id, metro_ids) + + def _remove_volume_from_metrogroup(self, volumes, metrogroup_id): + metro_ids = [] + for volume in volumes: + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + LOG.warning("Volume %s doesn't have hypermetro.", volume.id) + continue + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + LOG.warning("Volume %s doesn't have hypermetro on the array.", + volume.id) + continue + + metro_id = hypermetro['ID'] + self.local_cli.remove_metro_from_metrogroup( + metrogroup_id, metro_id) + metro_ids.append(metro_id) + + self._ensure_hypermetro_not_in_group(metrogroup_id, metro_ids) + + def update_consistencygroup(self, group_id, add_volumes, remove_volumes): + LOG.info("Update hypermetro consistency group %s.", group_id) + + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, group_id) + if not metrogroup: + msg = _('Hypermetro group %s to update not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._stop_consistencygroup_if_need(metrogroup) + self._add_volume_to_metrogroup(add_volumes, metrogroup['ID']) + self._remove_volume_from_metrogroup(remove_volumes, metrogroup['ID']) + self.local_cli.sync_metrogroup(metrogroup['ID']) + + def _stop_consistencygroup_if_need(self, metrogroup): + if (metrogroup['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL and + metrogroup['RUNNINGSTATUS'] in + (constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_metrogroup(metrogroup['ID']) + + def _stop_hypermetro_if_need(self, metro_id): + metro_info = self.local_cli.get_hypermetro_by_id(metro_id) + if metro_info and ( + (metro_info['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL) + and metro_info['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(metro_id) + + def add_hypermetro_to_group(self, group_id, metro_id): + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, group_id) + if not metrogroup: + msg = _('Hypermetro group %s to not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._stop_consistencygroup_if_need(metrogroup) + self._stop_hypermetro_if_need(metro_id) + self.local_cli.add_metro_to_metrogroup(metrogroup['ID'], metro_id) + self.local_cli.sync_metrogroup(metrogroup['ID']) diff --git a/Cinder/Bobcat/replication.py b/Cinder/Bobcat/replication.py new file mode 100644 index 0000000..c2ee727 --- /dev/null +++ b/Cinder/Bobcat/replication.py @@ -0,0 +1,535 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import six + +from oslo_log import log as logging +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + + +class BaseReplicationOp(object): + def __init__(self, loc_client, rmt_client): + self.loc_client = loc_client + self.rmt_client = rmt_client + + def _wait_until_status(self, rep_id, expect_statuses): + def _status_check(): + info = self.get_info(rep_id) + if info['HEALTHSTATUS'] != constants.REPLICA_HEALTH_STATUS_NORMAL: + msg = _('Replication status %s is abnormal.' + ) % info['HEALTHSTATUS'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if info['RUNNINGSTATUS'] in expect_statuses: + return True + + return False + + huawei_utils.wait_for_condition(_status_check, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + def _wait_until_role(self, rep_id, is_primary): + def _role_check(): + info = self.get_info(rep_id) + if info['HEALTHSTATUS'] != constants.REPLICA_HEALTH_STATUS_NORMAL: + msg = _('Replication status %s is abnormal.' + ) % info['HEALTHSTATUS'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if info['ISPRIMARY'] == is_primary: + return True + + return False + + huawei_utils.wait_for_condition(_role_check, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + def create(self, params): + return self._create(params) + + def delete(self, rep_id): + self._delete(rep_id) + + def sync(self, rep_id, client=None): + if not client: + client = self.loc_client + self._sync(rep_id, client) + + def split(self, rep_id, rep_info=None): + expect_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED) + info = rep_info or self.get_info(rep_id) + if (info.get('ISEMPTY') == 'true' or + info['RUNNINGSTATUS'] in expect_status): + return + + self._split(rep_id) + self._wait_until_status(rep_id, expect_status) + + def switch(self, rep_id): + self._switch(rep_id) + + def unprotect_secondary(self, rep_id): + self._unprotect_secondary(rep_id) + + def protect_secondary(self, rep_id): + self._protect_secondary(rep_id) + + def failover(self, rep_id): + """Failover replication. + + Steps: + 1. Split replication. + 2. Set secondary access readable & writable. + 3. Try to switch replication roles. + """ + self.split(rep_id) + self.unprotect_secondary(rep_id) + try: + self.switch(rep_id) + self._wait_until_role(rep_id, 'true') + self.protect_secondary(rep_id) + self.sync(rep_id, self.rmt_client) + except Exception: + LOG.warning('Switch replication %s roles failed, but secondary ' + 'is readable&writable now.', rep_id) + + def failback(self, rep_id): + """Failback replication. + + Steps: + 1. Switch the role of replication if needed. + 2. Sync original secondary data back to original primary. + 3. Recover original primary&secondary replication relationship. + """ + info = self.get_info(rep_id) + self.split(rep_id, info) + self.unprotect_secondary(rep_id) + + # If remote lun is primary, means the previous failover + # didn't switch the replication roles, so we need to switch + # again to make the original secondary lun primary. + if info['ISPRIMARY'] == 'true': + self.switch(rep_id) + self._wait_until_role(rep_id, 'false') + self.protect_secondary(rep_id) + self.sync(rep_id) + self._wait_until_status( + rep_id, (constants.REPLICA_RUNNING_STATUS_NORMAL,)) + + self.failover(rep_id) + + +class ReplicationPairOp(BaseReplicationOp): + def get_info(self, rep_id): + return self.rmt_client.get_replication_pair_by_id(rep_id) + + def _create(self, params): + return self.loc_client.create_replication_pair(params) + + def _delete(self, rep_id): + return self.loc_client.delete_replication_pair(rep_id) + + def _sync(self, rep_id, client): + client.sync_replication_pair(rep_id) + + def _split(self, rep_id): + self.loc_client.split_replication_pair(rep_id) + + def _switch(self, rep_id): + self.loc_client.switch_replication_pair(rep_id) + + def _unprotect_secondary(self, rep_id): + self.rmt_client.set_replication_pair_second_access( + rep_id, constants.REPLICA_SECOND_RW) + + def _protect_secondary(self, rep_id): + self.rmt_client.set_replication_pair_second_access( + rep_id, constants.REPLICA_SECOND_RO) + + +class ReplicationGroupOp(BaseReplicationOp): + def get_info(self, rep_id): + return self.rmt_client.get_replication_group_by_id(rep_id) + + def _create(self, params): + return self.loc_client.create_replication_group(params) + + def _delete(self, rep_id): + return self.loc_client.delete_replication_group(rep_id) + + def _sync(self, rep_id, client): + client.sync_replication_group(rep_id) + + def _split(self, rep_id): + self.loc_client.split_replication_group(rep_id) + + def _switch(self, rep_id): + self.loc_client.switch_replication_group(rep_id) + + def _unprotect_secondary(self, rep_id): + self.rmt_client.set_replication_group_second_access( + rep_id, constants.REPLICA_SECOND_RW) + + def _protect_secondary(self, rep_id): + self.rmt_client.set_replication_group_second_access( + rep_id, constants.REPLICA_SECOND_RO) + + def add_pair_to_group(self, group_id, pair_id): + return self.loc_client.add_pair_to_replication_group( + group_id, pair_id) + + def remove_pair_from_group(self, group_id, pair_id): + return self.loc_client.remove_pair_from_replication_group( + group_id, pair_id) + + +class _CheckCreateConditionTask(task.Task): + default_provides = set(('rmt_dev_id',)) + + def __init__(self, loc_client, rmt_client, *args, **kwargs): + super(_CheckCreateConditionTask, self).__init__(*args, **kwargs) + self.loc_client = loc_client + self.rmt_client = rmt_client + + def execute(self): + rmt_array = self.rmt_client.get_array_info() + rmt_dev = self.loc_client.get_remote_device_by_wwn(rmt_array['wwn']) + if not rmt_dev: + msg = _("Remote device %s doesn't exist.") % rmt_array['wwn'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return {'rmt_dev_id': rmt_dev['ID']} + + +class _CreateRemoteLunTask(task.Task): + default_provides = set(('remote_lun_id',)) + + def __init__(self, client, *args, **kwargs): + super(_CreateRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_params, rmt_pool): + pool_id = self.client.get_pool_id(rmt_pool) + if not pool_id: + msg = _('Remote pool %s for replication not exist.') % rmt_pool + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_params['PARENTID'] = pool_id + remote_lun = self.client.create_lun(lun_params) + huawei_utils.wait_lun_online(self.client, remote_lun['ID']) + return {'remote_lun_id': remote_lun['ID']} + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result['remote_lun_id']) + + +class _CreatePairTask(task.Task): + default_provides = set(('pair_id',)) + + def __init__(self, op, configs, *args, **kwargs): + super(_CreatePairTask, self).__init__(*args, **kwargs) + self.op = op + self.configs = configs + + def execute(self, local_lun_id, remote_lun_id, rmt_dev_id, replica_model): + params = { + "LOCALRESID": local_lun_id, + "REMOTEDEVICEID": rmt_dev_id, + "REMOTERESID": remote_lun_id, + "REPLICATIONMODEL": replica_model, + "RECOVERYPOLICY": '1', + "SPEED": self.configs['sync_speed'], + } + + if replica_model == constants.REPLICA_ASYNC_MODEL: + params['SYNCHRONIZETYPE'] = '2' + params['TIMINGVAL'] = constants.REPLICA_PERIOD + + pair_info = self.op.create(params) + self.op.sync(pair_info['ID']) + return {'pair_id': pair_info['ID']} + + +class ReplicationManager(object): + def __init__(self, local_client, rmt_client, configs): + self.loc_client = local_client + self.rmt_client = rmt_client + self.pair_op = ReplicationPairOp(self.loc_client, self.rmt_client) + self.group_op = ReplicationGroupOp(self.loc_client, self.rmt_client) + self.configs = configs.replication + self.configuration = configs + + def create_replica(self, local_lun_id, lun_params, replica_model): + """Create remote LUN and replication pair. + + Purpose: + 1. create remote lun + 2. create replication pair + 3. sync replication pair + """ + LOG.info(('Create replication, local lun: %(local_lun_id)s, ' + 'replication model: %(model)s.'), + {'local_lun_id': local_lun_id, 'model': replica_model}) + + store_spec = {'local_lun_id': local_lun_id, + 'lun_params': lun_params, + 'replica_model': replica_model, + 'rmt_pool': self.configs['storage_pools'][0], + } + + work_flow = linear_flow.Flow('create_replication') + work_flow.add( + _CheckCreateConditionTask(self.loc_client, self.rmt_client), + _CreateRemoteLunTask(self.rmt_client), + _CreatePairTask(self.pair_op, self.configs), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('pair_id') + + def delete_replica(self, pair_id): + LOG.info('Delete replication pair %s.', pair_id) + try: + pair_info = self.pair_op.get_info(pair_id) + except exception.VolumeBackendAPIException as exc: + if huawei_utils.is_not_exist_exc(exc): + return + raise + + self.pair_op.split(pair_id) + self.pair_op.delete(pair_id) + huawei_utils.remove_lun_from_lungroup( + self.rmt_client, pair_info['LOCALRESID'], + self.configuration.force_delete_volume) + self.rmt_client.delete_lun(pair_info['LOCALRESID']) + + def extend_replica(self, pair_id, new_size): + LOG.info('Extend replication pair %s', pair_id) + pair_info = self.pair_op.get_info(pair_id) + + cg_info = None + cg_id = None + if pair_info['ISINCG'] == 'true': + cg_id = pair_info['CGID'] + cg_info = self.group_op.get_info(cg_id) + + if cg_info: + self.group_op.split(cg_id, cg_info) + else: + self.pair_op.split(pair_id, pair_info) + + try: + self.rmt_client.extend_lun(pair_info['LOCALRESID'], new_size) + self.loc_client.extend_lun(pair_info['REMOTERESID'], new_size) + finally: + if cg_info: + self.group_op.sync(cg_id) + else: + self.pair_op.sync(pair_id) + + def _pre_fail_check(self, volumes, statuc_check_func): + normal_volumes = [] + pair_ids = [] + group_ids = set() + volume_pair_infos = {} + + for v in volumes: + drv_data = huawei_utils.to_dict(v.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + normal_volumes.append(v.id) + continue + + pair_info = self.pair_op.get_info(pair_id) + volume_pair_infos[v.id] = pair_info + + cg_id = pair_info.get('CGID') + if cg_id: + if cg_id not in group_ids: + group_ids.add(cg_id) + else: + pair_ids.append(pair_id) + + for pair_info in six.itervalues(volume_pair_infos): + if not statuc_check_func(pair_info): + msg = _('Replication pair %(id)s is not at the status ' + 'failover/failback available, RUNNINGSTATUS: %(run)s, ' + 'SECRESDATASTATUS: %(sec)s.' + ) % {'id': pair_info['ID'], + 'run': pair_info['RUNNINGSTATUS'], + 'sec': pair_info['SECRESDATASTATUS']} + LOG.error(msg) + raise exception.InvalidReplicationTarget(reason=msg) + + return normal_volumes, list(group_ids), pair_ids, volume_pair_infos + + def _fail_op(self, volumes, status_check_func, fail_group_func, + fail_pair_func): + (normal_volumes, group_ids, pair_ids, volume_pair_infos + ) = self._pre_fail_check(volumes, status_check_func) + + for group in group_ids: + fail_group_func(group) + + for pair in pair_ids: + fail_pair_func(pair) + + volumes_update = [] + for v in volumes: + if v.id in normal_volumes: + LOG.warning("Volume %s doesn't have replication.", v.id) + continue + + rmt_lun_id = volume_pair_infos[v.id]['LOCALRESID'] + rmt_lun_info = self.rmt_client.get_lun_info_by_id(rmt_lun_id) + location = huawei_utils.to_string( + huawei_lun_id=rmt_lun_id, + huawei_lun_wwn=rmt_lun_info['WWN'], + huawei_sn=self.rmt_client.device_id, + ) + + volume_update = {'volume_id': v.id} + volume_update['updates'] = { + 'provider_location': location, + } + + volumes_update.append(volume_update) + + return volumes_update + + def failback(self, volumes): + """Failback volumes to primary storage.""" + def _status_check_func(pair_info): + return pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED) + + return self._fail_op(volumes, _status_check_func, + self.group_op.failback, self.pair_op.failback) + + def failover(self, volumes): + """Failover volumes to secondary storage.""" + def _status_check_func(pair_info): + return pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED + ) and pair_info['SECRESDATASTATUS'] in ( + constants.REPLICA_SECRES_DATA_SYNC, + constants.REPLICA_SECRES_DATA_COMPLETE + ) + + return self._fail_op(volumes, _status_check_func, + self.group_op.failover, self.pair_op.failover) + + def create_group(self, group_id, replica_model): + LOG.info("Create replication group %s.", group_id) + group_name = huawei_utils.encode_name(group_id) + params = {'NAME': group_name, + 'DESCRIPTION': group_id, + 'RECOVERYPOLICY': '1', + 'REPLICATIONMODEL': replica_model, + 'SPEED': self.configs['sync_speed']} + + if replica_model == constants.REPLICA_ASYNC_MODEL: + params['SYNCHRONIZETYPE'] = '2' + params['TIMINGVAL'] = constants.REPLICA_CG_PERIOD + + group = self.group_op.create(params) + return group['ID'] + + def _add_volumes_to_group(self, group_id, volumes): + for volume in volumes: + drv_data = huawei_utils.to_dict(volume.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("Volume %s doesn't have replication.", volume.id) + continue + + self.pair_op.split(pair_id) + self.group_op.add_pair_to_group(group_id, pair_id) + + def _remove_volumes_from_group(self, group_id, volumes): + for volume in volumes: + drv_data = huawei_utils.to_dict(volume.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("Volume %s doesn't have replication.", volume.id) + continue + + self.group_op.remove_pair_from_group(group_id, pair_id) + self.pair_op.sync(pair_id) + + def delete_group(self, group_id, volumes): + LOG.info("Delete replication group %s.", group_id) + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + LOG.warning('Replication group %s to delete not exist.', + group_id) + return + + self.group_op.split(group_info['ID'], group_info) + self._remove_volumes_from_group(group_info['ID'], volumes) + self.group_op.delete(group_info['ID']) + + def update_group(self, group_id, add_volumes, remove_volumes): + LOG.info("Update replication group %s.", group_id) + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + LOG.warning('Replication group %s to update not exist.', + group_id) + return + + self.group_op.split(group_info['ID'], group_info) + self._add_volumes_to_group(group_info['ID'], add_volumes) + self._remove_volumes_from_group(group_info['ID'], remove_volumes) + self.group_op.sync(group_info['ID']) + + def add_replication_to_group(self, group_id, pair_id): + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + msg = _('Replication group %s not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.group_op.split(group_info['ID'], group_info) + self.pair_op.split(pair_id) + self.group_op.add_pair_to_group(group_info['ID'], pair_id) + self.group_op.sync(group_info['ID']) diff --git a/Cinder/Bobcat/rest_client.py b/Cinder/Bobcat/rest_client.py new file mode 100644 index 0000000..8932395 --- /dev/null +++ b/Cinder/Bobcat/rest_client.py @@ -0,0 +1,1749 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import inspect +import json +import requests +import six +import sys +import threading +import time + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants + +from oslo_concurrency import lockutils +from oslo_log import log as logging +from requests.adapters import HTTPAdapter + + +LOG = logging.getLogger(__name__) + + +def _error_code(result): + return result['error']['code'] + + +def obj_operation_wrapper(func): + @functools.wraps(func) + def wrapped(self, url_format=None, **kwargs): + url = self._obj_url + if url_format: + url += url_format % kwargs + + self.semaphore.acquire() + + try: + result = func(self, url, **kwargs) + except requests.HTTPError as exc: + return {"error": {"code": exc.response.status_code, + "description": six.text_type(exc)}} + finally: + self.semaphore.release() + + return result + + return wrapped + + +class CommonObject(object): + def __init__(self, client): + self.client = client + self.semaphore = client.semaphore + + @obj_operation_wrapper + def post(self, url, **kwargs): + return self.client.post(url, **kwargs) + + @obj_operation_wrapper + def put(self, url, **kwargs): + return self.client.put(url, **kwargs) + + @obj_operation_wrapper + def delete(self, url, **kwargs): + return self.client.delete(url, **kwargs) + + @obj_operation_wrapper + def get(self, url, **kwargs): + return self.client.get(url, **kwargs) + + @staticmethod + def _get_info_by_range(func, params=None): + range_start = 0 + info_list = [] + while True: + range_end = range_start + constants.GET_PATCH_NUM + info = func(range_start, range_end, params) + info_list += info + if len(info) < constants.GET_PATCH_NUM: + break + + range_start += constants.GET_PATCH_NUM + return info_list + + +def _assert_result(result, msg_format, *args): + if _error_code(result) != 0: + args += (result,) + msg = (msg_format + '\nresult: %s.') % args + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +class Lun(CommonObject): + _obj_url = '/lun' + + def create_lun(self, lun_params): + # Set the mirror switch always on + lun_params['MIRRORPOLICY'] = '1' + result = self.post(data=lun_params) + if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: + lun_info = self.get_lun_info_by_name(lun_params['NAME']) + if lun_info: + return lun_info + + if result['error']['code'] == constants.ERROR_VOLUME_TIMEOUT: + try_times = 2 + while try_times: + time.sleep(constants.GET_VOLUME_WAIT_INTERVAL) + LOG.info(("Create LUN TimeOut, try get lun info in %s " + "time"), 2 - try_times) + lun_info = self.get_lun_info_by_name(lun_params['NAME']) + if lun_info: + return lun_info + else: + try_times -= 1 + + _assert_result(result, 'Create lun %s error.', lun_params) + return result['data'] + + def create_lunclone(self, src_id, lun_name): + data = { + "CLONESOURCEID": src_id, + "ISCLONE": True, + "NAME": lun_name, + } + result = self.post(data=data) + _assert_result(result, 'Create clone lun for source ID %s error.', + src_id) + return result['data'] + + def delete_lun(self, lun_id): + result = self.delete('/%(lun)s', lun=lun_id) + if _error_code(result) == constants.ERROR_LUN_NOT_EXIST: + LOG.warning("LUN %s to delete does not exist.", lun_id) + return + _assert_result(result, 'Delete lun %s error.', lun_id) + + def get_lun_info_by_name(self, name): + result = self.get('?filter=NAME::%(name)s&range=[0-100]', name=name) + _assert_result(result, 'Get lun info by name %s error.', name) + if result.get('data'): + return result['data'][0] + + def update_lun(self, lun_id, data): + result = self.put('/%(id)s', id=lun_id, data=data) + _assert_result(result, 'Update lun %s properties %s error.', + lun_id, data) + + def extend_lun(self, lun_id, new_size): + data = {'ID': lun_id, + 'CAPACITY': new_size} + result = self.put('/expand', data=data) + _assert_result(result, 'Extend lun %s capacity error.', lun_id) + + def add_lun_to_partition(self, lun_id, partition_id): + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.post('/associate/cachepartition', data=data) + _assert_result(result, 'Add lun %s to partition %s error.', + lun_id, partition_id) + + def remove_lun_from_partition(self, lun_id, partition_id): + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.delete('/associate/cachepartition', data=data) + _assert_result(result, 'Remove lun %s from partition %s error.', + lun_id, partition_id) + + def rename_lun(self, lun_id, new_name, description=None): + data = {"NAME": new_name} + if description: + data.update({"DESCRIPTION": description}) + result = self.put('/%(id)s', id=lun_id, data=data) + _assert_result(result, 'Rename lun %s to %s error.', lun_id, new_name) + + def get_lun_count_of_lungroup(self, lungroup_id): + result = self.get("/count?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%(id)s", + id=lungroup_id) + _assert_result(result, 'Get lun count of lungroup %s error.', + lungroup_id) + return int(result['data']['COUNT']) + + def get_lun_info_by_id(self, lun_id): + result = self.get("/%(id)s", id=lun_id) + _assert_result(result, 'Get lun info by id %s error.', lun_id) + return result['data'] + + def get_lun_info_filter_id(self, lun_id): + result = self.get("?filter=ID::%(lun_id)s&range=[0-100]", + lun_id=lun_id) + _assert_result(result, 'Get lun info filter id %s error.', lun_id) + if result.get('data'): + return result['data'][0] + + def get_lun_host_lun_id(self, host_id, lun_info): + result = self.get( + "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s" + "&filter=NAME::%(name)s" + "&selectFields=ID,NAME,ASSOCIATEMETADATA,WWN", + id=host_id, name=lun_info['NAME']) + _assert_result(result, 'Get lun info related to host %s error.', + host_id) + + for item in result.get('data', []): + if lun_info['ID'] == item['ID']: + metadata = json.loads(item['ASSOCIATEMETADATA']) + return metadata['HostLUNID'] + + def is_host_associate_inband_lun(self, host_id): + result = self.get("/associate?ASSOCIATEOBJTYPE=21" + "&ASSOCIATEOBJID=%(id)s", + id=host_id) + _assert_result(result, 'Get host %s associate to lun error.', + host_id) + associate_data = result.get('data') + if not associate_data: + return False + + for lun_info in associate_data: + if lun_info.get("SUBTYPE") == constants.INBAND_LUN_TYPE: + return True + + return False + + +class StoragePool(CommonObject): + _obj_url = '/storagepool' + + def get_all_pools(self): + result = self.get() + _assert_result(result, 'Query storage pools error.') + return result.get('data', []) + + def get_pool_id(self, pool_name): + result = self.get('?filter=NAME::%(name)s', name=pool_name) + _assert_result(result, 'Query storage pool by name %s error.', + pool_name) + if result.get('data'): + return result['data'][0]['ID'] + + def get_pool_by_name(self, pool_name): + result = self.get('?filter=NAME::%(name)s', name=pool_name, + log_filter=True) + _assert_result(result, 'Query storage pool by name %s error.', + pool_name) + if result.get('data'): + return result['data'][0] + + +class Snapshot(CommonObject): + _obj_url = '/snapshot' + + def activate_snapshot(self, snapshot_ids): + if isinstance(snapshot_ids, list): + data = {"SNAPSHOTLIST": snapshot_ids} + else: + data = {"SNAPSHOTLIST": [snapshot_ids]} + result = self.post('/activate', data=data) + _assert_result(result, 'Activate snapshots %s error.', snapshot_ids) + + def create_snapshot(self, lun_id, snapshot_name, snapshot_description): + data = {"NAME": snapshot_name, + "DESCRIPTION": snapshot_description, + "PARENTID": lun_id} + result = self.post(data=data) + if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: + snapshot_info = self.get_snapshot_info_by_name(snapshot_name) + if snapshot_info: + return snapshot_info + + if result['error']['code'] == constants.ERROR_VOLUME_TIMEOUT: + try_times = 2 + while try_times: + time.sleep(constants.GET_VOLUME_WAIT_INTERVAL) + LOG.info(_("Create SNAPSHOT TimeOut, try get snapshot " + "info in %s time"), 2 - try_times) + snapshot_info = self.get_snapshot_info_by_name(snapshot_name) + if snapshot_info: + return snapshot_info + else: + try_times -= 1 + + _assert_result(result, 'Create snapshot %s for lun %s error.', + snapshot_name, lun_id) + return result['data'] + + def stop_snapshot(self, snapshot_id): + data = {"ID": snapshot_id} + result = self.put('/stop', data=data) + _assert_result(result, 'Stop snapshot %s error.', snapshot_id) + + def delete_snapshot(self, snapshot_id): + result = self.delete('/%(id)s', id=snapshot_id) + if _error_code(result) == constants.SNAPSHOT_NOT_EXIST: + LOG.warning('Snapshot %s to delete not exist.', snapshot_id) + return + _assert_result(result, 'Delete snapshot %s error.', snapshot_id) + + def get_snapshot_info_by_name(self, name): + result = self.get('?filter=NAME::%(name)s&range=[0-100]', name=name) + _assert_result(result, 'Get snapshot info by name %s error.', name) + if 'data' in result and result['data']: + return result['data'][0] + + def get_snapshot_info_by_id(self, snapshot_id): + result = self.get('/%(id)s', id=snapshot_id) + _assert_result(result, 'Get snapshot info by id %s error.', + snapshot_id) + return result['data'] + + def update_snapshot(self, snapshot_id, data): + result = self.put('/%(id)s', id=snapshot_id, data=data) + _assert_result(result, 'Update snapshot %s error.', snapshot_id) + + def get_snapshot_count_of_lungroup(self, lungroup_id): + result = self.get("/count?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%(id)s", + id=lungroup_id) + _assert_result(result, 'Get snapshot count of lungroup %s error.', + lungroup_id) + return int(result['data']['COUNT']) + + def get_snapshot_host_lun_id(self, host_id, snap_id): + result = self.get( + "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s", id=host_id) + _assert_result(result, 'Get snapshot info related to host %s error.', + host_id) + + for item in result.get('data', []): + if snap_id == item['ID']: + metadata = json.loads(item['ASSOCIATEMETADATA']) + return metadata['HostLUNID'] + + def rollback_snapshot(self, snapshot_id, speed): + data = {'ID': snapshot_id, + 'ROLLBACKSPEED': speed} + result = self.put('/rollback', data=data) + _assert_result(result, 'Rollback snapshot %s error.', snapshot_id) + + def cancel_rollback_snapshot(self, snapshot_id): + data = {'ID': snapshot_id} + result = self.put('/cancelrollback', data=data) + _assert_result(result, 'Cancel rollback snapshot %s error.', snapshot_id) + + +class LunCopy(CommonObject): + _obj_url = '/LUNCOPY' + + def create_luncopy(self, luncopyname, srclunid, tgtlunid, copy_speed): + param_format = "INVALID;%s;INVALID;INVALID;INVALID" + data = {"NAME": luncopyname, + "COPYSPEED": copy_speed, + "SOURCELUN": param_format % srclunid, + "TARGETLUN": param_format % tgtlunid} + result = self.post(data=data) + _assert_result(result, 'Create luncopy %s error.', luncopyname) + + return result['data']['ID'] + + def start_luncopy(self, luncopy_id): + data = {"ID": luncopy_id} + result = self.put('/start', data=data) + _assert_result(result, 'Start LUNCOPY %s error.', luncopy_id) + + def stop_luncopy(self, luncopy_id): + data = {"ID": luncopy_id} + result = self.put('/stop', data=data) + if _error_code(result) in (constants.LUNCOPY_ALREADY_STOPPED, + constants.LUNCOPY_COMPLETED): + LOG.warning('Luncopy %s already stopped or completed.', luncopy_id) + return + _assert_result(result, 'Stop LUNCOPY %s error.', luncopy_id) + + def get_luncopy_info(self, luncopy_id): + result = self.get('/%(id)s', id=luncopy_id) + _assert_result(result, 'Get LUNCOPY %s error.', luncopy_id) + return result.get('data', {}) + + def delete_luncopy(self, luncopy_id): + result = self.delete('/%(id)s', id=luncopy_id) + if _error_code(result) == constants.LUNCOPY_NOT_EXIST: + LOG.warning('Luncopy %s to delete not exist.', luncopy_id) + return + _assert_result(result, 'Delete LUNCOPY %s error.', luncopy_id) + + +class Host(CommonObject): + _obj_url = '/host' + + def get_host_id_by_name(self, host_name): + result = self.get('?filter=NAME::%(name)s&range=[0-100]', + name=host_name) + _assert_result(result, 'Get host by name %s error.', host_name) + if result.get('data'): + return result['data'][0]['ID'] + + def create_host(self, hostname, orig_host_name, info): + data = {"NAME": hostname, + "OPERATIONSYSTEM": "0", + "DESCRIPTION": orig_host_name} + data.update(info) + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + return self.get_host_id_by_name(hostname) + + _assert_result(result, 'Add host %s error.', hostname) + return result['data']['ID'] + + def update_host(self, host_id, data): + result = self.put('/%(id)s', id=host_id, data=data) + if _error_code(result) == constants.HOST_NOT_EXIST: + return + _assert_result(result, 'Update host %s error.', host_id) + + def delete_host(self, host_id): + result = self.delete('/%(id)s', id=host_id) + if _error_code(result) == constants.HOST_NOT_EXIST: + LOG.warning('Host %s to delete not exist.', host_id) + return + _assert_result(result, 'Delete host %s error.', host_id) + + def remove_host_from_hostgroup(self, hostgroup_id, host_id): + result = self.delete('/associate?ID=%(gid)s&ASSOCIATEOBJTYPE=21&' + 'ASSOCIATEOBJID=%(hid)s', + gid=hostgroup_id, hid=host_id) + if _error_code(result) == constants.HOST_NOT_IN_HOSTGROUP: + LOG.warning('Host %s not in hostgroup %s.', host_id, hostgroup_id) + return + _assert_result(result, 'Remove host %s from host group %s error.', + host_id, hostgroup_id) + + def get_host_by_hostgroup_id(self, hostgroup_id): + result = self.get("/associate?ASSOCIATEOBJTYPE=14&" + "ASSOCIATEOBJID=%(id)s", id=hostgroup_id) + _assert_result(result, 'Get host by hostgroup %s error.', hostgroup_id) + return [host.get('ID') for host in result.get("data", [])] + + +class PortGroup(CommonObject): + _obj_url = '/portgroup' + + def get_portgroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get portgroup in mappingview %s error', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def create_portgroup(self, portg_name): + data = {"NAME": portg_name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Portgroup %s to create already exist.', portg_name) + portgroup = self.get_portgroup_by_name(portg_name) + if portgroup: + return portgroup['ID'] + + _assert_result(result, 'Create portgroup %s error.', portg_name) + return result['data']['ID'] + + def delete_portgroup(self, portgroup_id): + result = self.delete('/%(id)s', id=portgroup_id) + if _error_code(result) == constants.PORTGROUP_NOT_EXIST: + LOG.warning('Portgroup %s to delete not exist.', portgroup_id) + return + _assert_result(result, 'Delete portgroup %s error.', portgroup_id) + + def get_portgroup_by_name(self, portg_name): + result = self.get('?filter=NAME::%(name)s', name=portg_name) + _assert_result(result, 'Get portgroup by name %s error.', portg_name) + if 'data' in result and result['data']: + return result['data'][0] + + def get_portgroup_by_port_id(self, port_id, port_type): + result = self.get("/associate?ASSOCIATEOBJTYPE=%(type)s&" + "ASSOCIATEOBJID=%(id)s", id=port_id, type=port_type) + _assert_result(result, 'Get portgroup by port %s error.', port_id) + return [group['ID'] for group in result.get("data", [])] + + +class HostGroup(CommonObject): + _obj_url = '/hostgroup' + + def get_hostgroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get hostgroup in mappingview %s error.', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def associate_host_to_hostgroup(self, hostgroup_id, host_id): + data = {"ID": hostgroup_id, + "ASSOCIATEOBJTYPE": "21", + "ASSOCIATEOBJID": host_id} + result = self.post('/associate', data=data) + if _error_code(result) == constants.HOST_ALREADY_IN_HOSTGROUP: + LOG.info('Object %(id)s already in hostgroup %(group)s.', + {'id': host_id, 'group': hostgroup_id}) + return + _assert_result(result, 'Associate host %s to hostgroup %s error.', + host_id, hostgroup_id) + + def create_hostgroup(self, name): + data = {'NAME': name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Hostgroup %s to create already exists.', name) + hostgroup = self.get_hostgroup_by_name(name) + return hostgroup['ID'] if hostgroup else None + _assert_result(result, 'Create hostgroup %s error.', name) + return result['data']['ID'] + + def delete_hostgroup(self, hostgroup_id): + result = self.delete('/%(id)s', id=hostgroup_id) + if _error_code(result) == constants.HOSTGROUP_NOT_EXIST: + LOG.info('Hostgroup %s to delete not exist.', hostgroup_id) + return + _assert_result(result, 'Delete hostgroup %s error.', hostgroup_id) + + def get_hostgroup_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get hostgroup by %s error.', name) + if 'data' in result and result['data']: + return result['data'][0] + + +class LunGroup(CommonObject): + _obj_url = '/lungroup' + + def associate_lun_to_lungroup(self, lungroup_id, obj_id, obj_type, + is_dorado_v6=False, is_associated_host=False): + data = {"ID": lungroup_id, + "ASSOCIATEOBJTYPE": obj_type, + "ASSOCIATEOBJID": obj_id} + if all((is_dorado_v6, is_associated_host)): + data['startHostLunId'] = 1 + result = self.post('/associate', data=data) + if _error_code(result) in (constants.OBJECT_ID_NOT_UNIQUE, + constants.LUN_ALREADY_IN_LUNGROUP): + LOG.info('Object %(id)s already in lungroup %(group)s.', + {'id': obj_id, 'group': lungroup_id}) + return + _assert_result(result, 'Associate obj %s to lungroup %s error.', + obj_id, lungroup_id) + + def remove_lun_from_lungroup(self, lungroup_id, obj_id, obj_type): + result = self.delete( + "/associate?ID=%(lungroup_id)s&ASSOCIATEOBJTYPE=%(obj_type)s&" + "ASSOCIATEOBJID=%(obj_id)s", lungroup_id=lungroup_id, + obj_id=obj_id, obj_type=obj_type) + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('LUN %(lun)s not exist in lungroup %(gp)s.', + {'lun': obj_id, 'gp': lungroup_id}) + return + _assert_result(result, 'Remove lun %s from lungroup %s error.', + obj_id, lungroup_id) + + def get_lungroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get lungroup in mappingview %s error.', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def get_lungroup_by_name(self, lungroup_name): + """Get the given hostgroup id.""" + result = self.get('?filter=NAME::%(name)s', name=lungroup_name) + _assert_result(result, 'Get lungroup info by name %s error.', + lungroup_name) + if 'data' in result and result['data']: + return result['data'][0] + + def create_lungroup(self, lungroup_name): + data = {"APPTYPE": '0', + "NAME": lungroup_name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Lungroup %s to create already exists.', lungroup_name) + lungroup = self.get_lungroup_by_name(lungroup_name) + return lungroup['ID'] if lungroup else None + + _assert_result(result, 'Create lungroup %s error.', lungroup_name) + return result['data']['ID'] + + def delete_lungroup(self, lungroup_id): + result = self.delete('/%(id)s', id=lungroup_id) + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('Lungroup %s to delete not exist.', lungroup_id) + return + _assert_result(result, 'Delete lungroup %s error.', lungroup_id) + + def get_lungroup_ids_by_lun_id(self, lun_id, lun_type=constants.LUN_TYPE): + result = self.get('/associate?TYPE=256&ASSOCIATEOBJTYPE=%(type)s&' + 'ASSOCIATEOBJID=%(id)s', type=lun_type, id=lun_id) + _assert_result(result, 'Get lungroup id by lun id %s error.', lun_id) + + lungroup_ids = [] + if 'data' in result: + for item in result['data']: + lungroup_ids.append(item['ID']) + + return lungroup_ids + + +class IscsiInitiator(CommonObject): + _obj_url = '/iscsi_initiator' + + def add_iscsi_initiator(self, initiator): + data = {'ID': initiator} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_ID_NOT_UNIQUE: + LOG.info('iscsi initiator %s already exists.', initiator) + return + _assert_result(result, 'Add iscsi initiator %s error.', initiator) + + def associate_iscsi_initiator_to_host(self, initiator, host_id, alua_info): + data = { + "PARENTTYPE": "21", + "PARENTID": host_id, + } + data.update(alua_info) + + result = self.put('/%(ini)s', data=data, ini=initiator) + _assert_result(result, 'Add initiator %s to host %s error.', + initiator, host_id) + + def update_iscsi_initiator_chap(self, initiator, chap_info): + if chap_info: + data = {"USECHAP": "true", + "CHAPNAME": chap_info['CHAPNAME'], + "CHAPPASSWORD": chap_info['CHAPPASSWORD']} + else: + data = {"USECHAP": "false", + "MULTIPATHTYPE": "0"} + + result = self.put('/%(ini)s', data=data, ini=initiator, log_filter=True) + _assert_result(result, 'Update initiator %s chap error.', initiator) + LOG.info("Update initiator chap info successfully, " + "url is /iscsi_initiator/%s, method is %s", initiator, 'put') + + def remove_iscsi_initiator_from_host(self, initiator): + data = {"ID": initiator} + result = self.put('/remove_iscsi_from_host', data=data) + if _error_code(result) == constants.INITIATOR_NOT_IN_HOST: + LOG.warning('ISCSI initiator %s not in host.', initiator) + return + _assert_result(result, 'Remove iscsi initiator %s from host error.', + initiator) + + def get_host_iscsi_initiators(self, host_id): + result = self.get('?PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get iscsi initiators of host %s error.', + host_id) + initiators = [] + for item in result.get('data', []): + initiators.append(item['ID']) + return initiators + + def get_iscsi_initiator(self, initiator): + result = self.get('/%(id)s', id=initiator) + _assert_result(result, 'Get iscsi initiator %s error.', initiator) + return result['data'] + + +class MappingView(CommonObject): + _obj_url = '/mappingview' + + def get_mappingview_by_name(self, name): + result = self.get('?filter=NAME::%(name)s&range=[0-100]', name=name) + _assert_result(result, 'Find mapping view by name %s error', name) + if 'data' in result and result['data']: + return result['data'][0] + + def create_mappingview(self, name): + data = {"NAME": name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Mappingview %s to create already exists.', name) + mappingview = self.get_mappingview_by_name(name) + return mappingview['ID'] if mappingview else None + _assert_result(result, 'Create mappingview by name %s error.', name) + return result['data']['ID'] + + def _associate_group_to_mappingview(self, view_id, group_id, group_type): + data = {"ASSOCIATEOBJTYPE": group_type, + "ASSOCIATEOBJID": group_id, + "ID": view_id} + result = self.put('/create_associate', data=data) + if _error_code(result) in (constants.HOSTGROUP_ALREADY_IN_MAPPINGVIEW, + constants.PORTGROUP_ALREADY_IN_MAPPINGVIEW, + constants.LUNGROUP_ALREADY_IN_MAPPINGVIEW): + LOG.warning('Group %(group_id)s of type %(type)s already exist ' + 'in mappingview %(view_id)s.', + {'group_id': group_id, 'type': group_type, + 'view_id': view_id}) + return + _assert_result(result, 'Associate group %s to mappingview %s error.', + group_id, view_id) + + def associate_hostgroup_to_mappingview(self, view_id, hostgroup_id): + self._associate_group_to_mappingview(view_id, hostgroup_id, '14') + + def associate_lungroup_to_mappingview(self, view_id, lungroup_id): + self._associate_group_to_mappingview(view_id, lungroup_id, '256') + + def associate_portgroup_to_mappingview(self, view_id, portgroup_id): + self._associate_group_to_mappingview(view_id, portgroup_id, '257') + + def _remove_group_from_mappingview(self, view_id, group_id, group_type): + data = {"ASSOCIATEOBJTYPE": group_type, + "ASSOCIATEOBJID": group_id, + "ID": view_id} + result = self.put('/remove_associate', data=data) + if _error_code(result) in (constants.HOSTGROUP_NOT_IN_MAPPINGVIEW, + constants.PORTGROUP_NOT_IN_MAPPINGVIEW, + constants.LUNGROUP_NOT_IN_MAPPINGVIEW): + LOG.warning('Group %(group_id)s of type %(type)s not exist in ' + 'mappingview %(view_id)s.', + {'group_id': group_id, 'type': group_type, + 'view_id': view_id}) + return + _assert_result(result, 'Remove group %s from mappingview %s error.', + group_id, view_id) + + def remove_lungroup_from_mappingview(self, view_id, lungroup_id): + self._remove_group_from_mappingview(view_id, lungroup_id, '256') + + def remove_hostgroup_from_mappingview(self, view_id, hostgroup_id): + self._remove_group_from_mappingview(view_id, hostgroup_id, '14') + + def remove_portgroup_from_mappingview(self, view_id, portgroup_id): + self._remove_group_from_mappingview(view_id, portgroup_id, '257') + + def delete_mapping_view(self, view_id): + result = self.delete('/%(id)s', id=view_id) + if _error_code(result) == constants.MAPPINGVIEW_NOT_EXIST: + LOG.warning('Mappingview %s to delete not exist.', view_id) + return + _assert_result(result, 'Delete mappingview %s error.', view_id) + + def change_hostlun_id(self, view_id, lun_id, hostlun_id): + data = {"ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id, + "ASSOCIATEMETADATA": [ + {"LUNID": lun_id, + "hostLUNId": six.text_type(hostlun_id)}] + } + result = self.put('/%(id)s', id=view_id, data=data) + _assert_result(result, 'Change hostlun id for lun %s in mappingview ' + '%s error.', lun_id, view_id) + + def get_mappingview_by_id(self, view_id): + result = self.get('/%(id)s', id=view_id) + _assert_result(result, 'Get mappingview info by id %s error.', + view_id) + return result["data"] + + def get_mappingview_by_portgroup_id(self, portgroup_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=257&' + 'ASSOCIATEOBJID=%(id)s', id=portgroup_id) + _assert_result(result, 'Get mappingviews by portgroup %s error.', + portgroup_id) + return [view['ID'] for view in result.get("data", [])] + + def get_mappingview_by_lungroup_id(self, lungroup_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=256&' + 'ASSOCIATEOBJID=%(id)s', id=lungroup_id) + _assert_result(result, 'Get mappingviews by lungroup %s error.', + lungroup_id) + return result.get("data", []) + + +class FCInitiator(CommonObject): + _obj_url = '/fc_initiator' + + def get_fc_initiator_count(self): + result = self.get("/count") + _assert_result(result, 'Get FC initiator count error.') + return int(result['data']['COUNT']) + + def _get_fc_initiator(self, start, end): + result = self.get("?range=[%(start)s-%(end)s]", start=start, end=end) + _assert_result(result, 'Get online free FC wwn error.') + + totals = [] + frees = [] + for item in result.get('data', []): + totals.append(item['ID']) + if item['RUNNINGSTATUS'] == '27' and item['ISFREE'] == 'true': + frees.append(item['ID']) + return totals, frees + + def get_fc_initiators(self): + fc_initiator_count = self.get_fc_initiator_count() + totals = [] + frees = [] + range_start = 0 + + while fc_initiator_count > 0: + range_end = range_start + constants.GET_PATCH_NUM + _totals, _frees = self._get_fc_initiator(range_start, range_end) + totals += _totals + frees += _frees + fc_initiator_count -= constants.GET_PATCH_NUM + range_start += constants.GET_PATCH_NUM + return totals, frees + + def get_fc_init_info(self, wwn): + """Get wwn info by wwn_id and judge is error need to be raised""" + result = self.get("/%(wwn)s", wwn=wwn) + + if _error_code(result) != 0: + if _error_code(result) not in (constants.FC_INITIATOR_NOT_EXIST, + constants.ERROR_PARAMETER_ERROR): + msg = (_('Get fc initiator %(initiator)s on array error. ' + 'result: %(res)s.') % {'initiator': wwn, + 'res': result}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + return {} + + return result.get('data', {}) + + def add_fc_initiator(self, initiator): + data = {'ID': initiator} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_ID_NOT_UNIQUE: + LOG.info('FC initiator %s already exists.', initiator) + return + _assert_result(result, 'Add FC initiator %s error.', initiator) + + def associate_fc_initiator_to_host(self, host_id, wwn, alua_info): + data = { + "PARENTTYPE": 21, + "PARENTID": host_id, + } + data.update(alua_info) + + result = self.put('/%(id)s', data=data, id=wwn) + _assert_result(result, 'Add FC initiator %s to host %s error.', + wwn, host_id) + + def get_host_fc_initiators(self, host_id): + result = self.get('?PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get FC initiators of host %s error.', + host_id) + return [item['ID'] for item in result.get('data', [])] + + def remove_fc_initiator_from_host(self, initiator): + data = {"ID": initiator} + result = self.put('/remove_fc_from_host', data=data) + if _error_code(result) == constants.INITIATOR_NOT_IN_HOST: + LOG.warning('FC initiator %s not in host.', initiator) + return + _assert_result(result, 'Remove fc initiator %s from host error.', + initiator) + + +class HostLink(CommonObject): + _obj_url = '/host_link' + + def get_fc_target_wwpns(self, ini): + result = self.get('?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=%(wwn)s', + wwn=ini) + _assert_result(result, 'Get FC target wwn for initiator %s error.', + ini) + return [fc['TARGET_PORT_WWN'] for fc in result.get('data', [])] + + def get_host_link(self, host_id): + result = self.get('?INITIATOR_TYPE=223&PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get host link for host %s error.', host_id) + return result.get('data', []) + + +class IOClass(CommonObject): + _obj_url = '/ioclass' + + def create_qos(self, qos, lun_id): + localtime = time.strftime('%Y%m%d%H%M%S', time.localtime()) + qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime + + data = {"NAME": qos_name, + "LUNLIST": [lun_id], + "CLASSTYPE": "1", + "SCHEDULEPOLICY": "2", + "SCHEDULESTARTTIME": "1410969600", + "STARTTIME": "08:00", + "DURATION": "86400", + "CYCLESET": "[1,2,3,4,5,6,0]", + } + data.update(qos) + + result = self.post(data=data) + _assert_result(result, 'Create QoS policy %s error.', qos) + return result['data']['ID'] + + def delete_qos(self, qos_id): + result = self.delete('/%(id)s', id=qos_id) + _assert_result(result, 'Delete QoS policy %s error.', qos_id) + + def activate_deactivate_qos(self, qos_id, enablestatus): + """Activate or deactivate QoS. + + enablestatus: true (activate) + enbalestatus: false (deactivate) + """ + data = {"ID": qos_id, + "ENABLESTATUS": enablestatus} + result = self.put('/active', data=data) + _assert_result(result, 'Change QoS %s to status %s error.', + qos_id, enablestatus) + + def get_qos_info(self, qos_id): + result = self.get('/%(id)s', id=qos_id) + _assert_result(result, 'Get QoS %s info error.', qos_id) + return result['data'] + + def get_all_qos(self): + result = self.get() + _assert_result(result, 'Get all QoS information error.') + return result.get('data', []) + + def update_qos_luns(self, qos_id, lun_list): + """Add lun to QoS.""" + data = {"LUNLIST": lun_list} + result = self.put('/%(qos_id)s', data=data, qos_id=qos_id) + _assert_result(result, 'Update lun list %s to QoS %s error.', + lun_list, qos_id) + + +class EthPort(CommonObject): + _obj_url = '/eth_port' + + def get_eth_ports_in_portgroup(self, portgroup_id): + result = self.get("/associate?ASSOCIATEOBJTYPE=257&" + "ASSOCIATEOBJID=%(id)s", id=portgroup_id) + _assert_result(result, 'Get eth ports in portgroup %s error.', + portgroup_id) + return result.get("data", []) + + +class IscsiTgtPort(CommonObject): + _obj_url = '/iscsi_tgt_port' + + def get_iscsi_tgt_ports(self): + result = self.get() + _assert_result(result, "Get iscsi target ports info error.") + return result.get('data', []) + + +class LunMigration(CommonObject): + _obj_url = '/lun_migration' + + def create_lun_migration(self, src_id, dst_id): + data = {"PARENTID": src_id, + "TARGETLUNID": dst_id, + "SPEED": '2', + "WORKMODE": 0} + + result = self.post(data=data) + _assert_result(result, 'Create migration from %s to %s error.', + src_id, dst_id) + return result['data'] + + def get_lun_migration(self, migration_id): + result = self.get('/%(id)s', id=migration_id) + _assert_result(result, 'Get migration info %s error.', migration_id) + return result['data'] + + def delete_lun_migration(self, migration_id): + result = self.delete('/%(id)s', id=migration_id) + if _error_code(result) == constants.MIGRATION_NOT_EXIST: + LOG.warning('Migration %s to delete not exist.', migration_id) + return + _assert_result(result, 'Delete migration %s error.', migration_id) + + +class CachePartition(CommonObject): + _obj_url = '/cachepartition' + + def get_partition_id_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get partition by name %s error.', name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0]['ID'] + + def get_partition_info_by_id(self, partition_id): + result = self.get('/%(id)s', id=partition_id) + _assert_result(result, 'Get partition info by id %s error.', + partition_id) + return result['data'] + + +class SmartCachePartition(CommonObject): + _obj_url = '/smartcachepartition' + + def get_cache_id_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get smartcachepartition by name %s error.', + name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0]['ID'] + + def get_cache_info_by_id(self, cacheid): + result = self.get('/%(id)s', id=cacheid) + _assert_result(result, 'Get smartcachepartition by id %s error.', + cacheid) + return result['data'] + + def remove_lun_from_cache(self, lun_id, cache_id): + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + + result = self.put('/remove_associate', data=data) + _assert_result(result, 'Remove lun %s from smartcachepartition ' + '%s error.', lun_id, cache_id) + + def add_lun_to_cache(self, lun_id, cache_id): + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.put('/create_associate', data=data) + _assert_result(result, 'Add lun %s to smartcachepartition ' + '%s error.', lun_id, cache_id) + + +class FCPort(CommonObject): + _obj_url = '/fc_port' + + def get_fc_ports(self): + result = self.get() + _assert_result(result, 'Get FC ports from array error.') + return result.get('data', []) + + def get_fc_ports_in_portgroup(self, portgroup_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=257' + '&ASSOCIATEOBJID=%(id)s', id=portgroup_id) + _assert_result(result, 'Get FC ports in portgroup %s error.', + portgroup_id) + return result.get("data", []) + + +class HyperMetroDomain(CommonObject): + _obj_url = '/HyperMetroDomain' + + def get_hypermetro_domain_id(self, domain_name): + domain_list = self._get_info_by_range(self._get_hypermetro_domain) + for item in domain_list: + if domain_name == item.get('NAME'): + return item.get('ID') + return None + + def _get_hypermetro_domain(self, start, end, params): + url = ("?range=[%(start)s-%(end)s]" + % {"start": str(start), "end": str(end)}) + result = self.get(url) + _assert_result(result, "Get hyper metro domains info error.") + return result.get('data', []) + + +class HyperMetroPair(CommonObject): + _obj_url = '/HyperMetroPair' + + def create_hypermetro(self, hcp_param): + result = self.post(data=hcp_param) + if result['error']['code'] == constants.HYPERMETRO_ALREADY_EXIST: + hypermetro_info = self.get_hypermetro_by_lun_id( + hcp_param["LOCALOBJID"]) + if hypermetro_info: + return hypermetro_info + + if result['error']['code'] == constants.CREATE_HYPERMETRO_TIMEOUT: + try_times = 2 + while try_times: + time.sleep(constants.GET_VOLUME_WAIT_INTERVAL) + LOG.info(_("Create SNAPSHOT TimeOut, try get snapshot " + "info in %s time"), 2 - try_times) + hypermetro_info = self.get_hypermetro_by_lun_id( + hcp_param["LOCALOBJID"]) + if hypermetro_info: + return hypermetro_info + else: + try_times -= 1 + _assert_result(result, 'Create hypermetro pair %s error.', hcp_param) + return result['data'] + + def delete_hypermetro(self, metro_id): + result = self.delete('/%(id)s', id=metro_id) + if _error_code(result) == constants.HYPERMETRO_NOT_EXIST: + LOG.warning('Hypermetro %s to delete not exist.', metro_id) + return + _assert_result(result, 'Delete hypermetro %s error.', metro_id) + + def sync_hypermetro(self, metro_id): + data = {"ID": metro_id} + result = self.put('/synchronize_hcpair', data=data) + _assert_result(result, 'Sync hypermetro %s error.', metro_id) + + def stop_hypermetro(self, hypermetro_id): + data = {"ID": hypermetro_id} + result = self.put('/disable_hcpair', data=data) + _assert_result(result, 'Stop hypermetro %s error.', hypermetro_id) + + def get_hypermetro_by_id(self, metro_id): + result = self.get('?filter=ID::%(id)s', id=metro_id) + _assert_result(result, 'Get hypermetro by id %s error.', metro_id) + if result.get('data'): + return result['data'][0] + + def get_hypermetro_by_lun_name(self, lun_name): + result = self.get('?filter=LOCALOBJNAME::%(name)s', name=lun_name) + _assert_result(result, 'Get hypermetro by local lun name' + ' %s error.', lun_name) + if result.get('data'): + return result['data'][0] + + def get_hypermetro_by_lun_id(self, lun_id): + result = self.get('?filter=LOCALOBJID::%(name)s', name=lun_id) + _assert_result(result, 'Get hypermetro by local lun id %s error.', + lun_id) + if result.get('data'): + return result['data'][0] + + +class HyperMetroConsistentGroup(CommonObject): + _obj_url = '/HyperMetro_ConsistentGroup' + + def get_metrogroup_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get hypermetro group by name %s error.', name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0] + + def create_metrogroup(self, group_params): + result = self.post(data=group_params) + _assert_result(result, 'Create hypermetro group %s error.', + group_params) + + def delete_metrogroup(self, metrogroup_id): + result = self.delete('/%(id)s', id=metrogroup_id) + if _error_code(result) == constants.HYPERMETROGROUP_NOT_EXIST: + LOG.warning('Hypermetro group %s to delete not exist.', + metrogroup_id) + return + _assert_result(result, 'Delete hypermetro group %s error.', + metrogroup_id) + + def stop_metrogroup(self, metrogroup_id): + data = {"ID": metrogroup_id} + result = self.put('/stop', data=data) + _assert_result(result, 'Stop hypermetro group %s error.', + metrogroup_id) + + def sync_metrogroup(self, metrogroup_id): + data = {"ID": metrogroup_id} + result = self.put('/sync', data=data) + if _error_code(result) == constants.NO_HYPERMETRO_EXIST_IN_GROUP: + LOG.info('Hypermetro group %s to sync is empty.', metrogroup_id) + return + _assert_result(result, 'Sync hypermetro group %s error.', + metrogroup_id) + + +class HyperMetro(CommonObject): + _obj_url = '/hyperMetro' + + def add_metro_to_metrogroup(self, metrogroup_id, metro_id): + data = {"ID": metrogroup_id, + "ASSOCIATEOBJID": metro_id} + result = self.post('/associate/pair', data=data) + if _error_code(result) == constants.HYPERMETRO_ALREADY_IN_GROUP: + LOG.warning('Hypermetro %(m_id) to add already in group %(g_id)s', + m_id=metro_id, g_id=metrogroup_id) + return + _assert_result(result, 'Add hypermetro %s to group %s error.', + metro_id, metrogroup_id) + + def remove_metro_from_metrogroup(self, metrogroup_id, metro_id): + data = {"ID": metrogroup_id, + "ASSOCIATEOBJID": metro_id} + result = self.delete('/associate/pair', data=data) + if _error_code(result) == constants.HYPERMETRO_NOT_IN_GROUP: + LOG.warning('Hypermetro %(mid) to remove not in group %(gid)s', + {'mid': metro_id, 'gid': metrogroup_id}) + return + _assert_result(result, 'Delete hypermetro %s from group %s error.', + metro_id, metrogroup_id) + + +class Port(CommonObject): + _obj_url = '/port' + + def add_port_to_portgroup(self, portgroup_id, port_id): + data = {"ASSOCIATEOBJID": port_id, + "ASSOCIATEOBJTYPE": 212, + "ID": portgroup_id} + result = self.post('/associate/portgroup', data=data) + if _error_code(result) == constants.PORT_ALREADY_IN_PORTGROUP: + LOG.warning('Port %(pid)s already in portgroup %(gid)s.', + {'pid': port_id, 'gid': portgroup_id}) + return + _assert_result(result, 'Add port %s to portgroup %s error.', + port_id, portgroup_id) + + def remove_port_from_portgroup(self, portgroup_id, port_id): + result = self.delete('/associate/portgroup?ID=%(gid)s&' + 'ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(pid)s', + gid=portgroup_id, pid=port_id) + if _error_code(result) == constants.PORT_NOT_IN_PORTGROUP: + LOG.warning('Port %(pid)s not in portgroup %(gid)s.', + {'pid': port_id, 'gid': portgroup_id}) + return + _assert_result(result, 'Remove port %s from portgroup %s error.', + port_id, portgroup_id) + + +class RemoteDevice(CommonObject): + _obj_url = '/remote_device' + + def get_remote_device_by_wwn(self, wwn): + result = self.get() + _assert_result(result, 'Get remote devices error.') + for device in result.get('data', []): + if device.get('WWN') == wwn: + return device + + +class ReplicationPair(CommonObject): + _obj_url = '/REPLICATIONPAIR' + + def create_replication_pair(self, pair_params): + result = self.post(data=pair_params) + _assert_result(result, 'Create replication %s error.', pair_params) + return result['data'] + + def get_replication_pair_by_id(self, pair_id): + result = self.get('/%(id)s', id=pair_id) + if _error_code(result) == constants.REPLICATION_PAIR_NOT_EXIST: + _assert_result(result, 'Replication pair %s not exist.', pair_id) + else: + _assert_result(result, 'Get replication pair %s error.', pair_id) + return result['data'] + + def switch_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/switch', data=data) + _assert_result(result, 'Switch over replication pair %s error.', + pair_id) + + def split_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/split', data=data) + _assert_result(result, 'Split replication pair %s error.', pair_id) + + def delete_replication_pair(self, pair_id, force=False): + if force: + data = {"ISLOCALDELETE": force} + result = self.delete('/%(id)s', id=pair_id, data=data) + else: + result = self.delete('/%(id)s', id=pair_id) + + if _error_code(result) == constants.REPLICATION_PAIR_NOT_EXIST: + LOG.warning('Replication pair to delete %s not exist.', + pair_id) + return + _assert_result(result, 'Delete replication pair %s error.', pair_id) + + def sync_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/sync', data=data) + _assert_result(result, 'Sync replication pair %s error.', pair_id) + + def set_replication_pair_second_access(self, pair_id, access): + data = {"SECRESACCESS": access} + result = self.put('/%(id)s', id=pair_id, data=data) + _assert_result(result, 'Set replication pair %s secondary access ' + 'to %s error.', pair_id, access) + + +class ReplicationConsistencyGroup(CommonObject): + _obj_url = '/CONSISTENTGROUP' + + def create_replication_group(self, group_params): + result = self.post(data=group_params) + _assert_result(result, 'Create replication group %s error.', + group_params) + return result['data'] + + def get_replication_group_by_name(self, group_name): + result = self.get('?filter=NAME::%(name)s', name=group_name) + _assert_result(result, 'Get replication group by name %s error.', + group_name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0] + + def get_replication_group_by_id(self, group_id): + result = self.get('/%(id)s', id=group_id) + _assert_result(result, 'Get replication group by id %s error.', + group_id) + return result['data'] + + def delete_replication_group(self, group_id): + result = self.delete('/%(id)s', id=group_id) + if _error_code(result) == constants.REPLICATION_GROUP_NOT_EXIST: + LOG.warning('Replication group %s to delete not exist.', group_id) + return + _assert_result(result, 'Delete replication group %s error.', group_id) + + def set_replication_group_second_access(self, group_id, access): + data = {"SECRESACCESS": access} + result = self.put("/%(id)s", id=group_id, data=data) + _assert_result(result, 'Set replication group %s second access to ' + '%s error.', group_id, access) + + +class LicenseFeature(CommonObject): + _obj_url = '/license/feature' + + def get_feature_status(self): + result = self.get(log_filter=True) + if result['error']['code'] != 0: + LOG.warning('Query feature information failed.') + return {} + + status = {} + for feature in result.get('data', []): + status.update(feature) + + return status + + +class ClonePair(CommonObject): + _obj_url = '/clonepair' + + def create_clone_pair(self, source_id, target_id, clone_speed): + data = {"copyRate": clone_speed, + "sourceID": source_id, + "targetID": target_id, + "isNeedSynchronize": "0"} + result = self.post("/relation", data=data) + _assert_result(result, 'Create ClonePair error, source_id is %s.', + source_id) + return result['data']['ID'] + + def sync_clone_pair(self, pair_id): + data = {"ID": pair_id, "copyAction": 0} + result = self.put("/synchronize", data=data) + _assert_result(result, 'Sync ClonePair error, pair is %s.', pair_id) + + def stop_clone_pair(self, pair_id): + data = {"ID": pair_id, "copyAction": 2} + result = self.put("/synchronize", data=data) + _assert_result(result, 'Stop ClonePair error, pair is %s.', pair_id) + + def get_clone_pair_info(self, pair_id): + result = self.get('/%(id)s', id=pair_id) + _assert_result(result, 'Get ClonePair %s error.', pair_id) + return result.get('data', {}) + + def delete_clone_pair(self, pair_id, delete_dst_lun=False): + data = {"ID": pair_id, + "isDeleteDstLun": delete_dst_lun} + result = self.delete("/%(id)s", id=pair_id, data=data) + if _error_code(result) == constants.CLONE_PAIR_NOT_EXIST: + LOG.warning('ClonePair %s to delete not exist.', pair_id) + return + _assert_result(result, 'Delete ClonePair %s error.', pair_id) + + +class HostNameIgnoringAdapter(HTTPAdapter): + def cert_verify(self, conn, url, verify, cert): + conn.assert_hostname = False + return super(HostNameIgnoringAdapter, self).cert_verify( + conn, url, verify, cert) + + +def rest_operation_wrapper(func): + @functools.wraps(func) + def wrapped(self, url, **kwargs): + need_relogin = False + + if not kwargs.get('log_filter'): + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', + {'url': (self._login_url or '') + url, + 'method': func.__name__, 'data': kwargs.get('data')}) + + with self._session_lock.read_lock(): + if self._login_url: + full_url = self._login_url + url + old_token = self._session.headers.get('iBaseToken') + try: + r = func(self, full_url, **kwargs) + except requests.RequestException as err: + if "BadStatusLine" in six.text_type(err): + need_relogin = True + else: + LOG.exception( + 'Request URL: %(url)s, method: %(method)s failed ' + 'at first time. Will switch login url and retry ' + 'this request.', + {'url': full_url, 'method': func.__name__}) + need_relogin = True + else: + r.raise_for_status() + result = r.json() + if _error_code(result) in constants.RELOGIN_ERROR_CODE: + LOG.error("Can't open the recent url, relogin.") + need_relogin = True + else: + need_relogin = True + old_token = None + + if need_relogin: + self._relogin(old_token) + try: + with self._session_lock.read_lock(): + full_url = self._login_url + url + r = func(self, full_url, **kwargs) + except requests.RequestException: + LOG.exception('Request URL: %(url)s, method: %(method)s ' + 'failed again.', + {'url': full_url, + 'method': func.__name__}) + raise + + r.raise_for_status() + result = r.json() + response_time = r.elapsed.total_seconds() + if not kwargs.get('log_filter'): + LOG.info('Response: %s, Response duration time is %s', + result, response_time) + return result + + return wrapped + + +class RestClient(object): + def __init__(self, config_dict): + self.san_address = config_dict.get('san_address') + self.san_user = config_dict.get('san_user') + self.san_password = config_dict.get('san_password') + self.vstore_name = config_dict.get('vstore_name') + self.ssl_verify = config_dict.get('ssl_cert_verify') + self.cert_path = config_dict.get('ssl_cert_path') + self.in_band_or_not = config_dict.get('in_band_or_not') + self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) + + self._login_url = None + self._login_device_id = None + self._session_lock = lockutils.ReaderWriterLock() + self._session = None + self._init_object_methods() + + if self.in_band_or_not and not self.storage_sn: + msg = _("please check 'InBandOrNot' and 'Storagesn' " + "they are invaid.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.ssl_verify and hasattr(requests, 'packages'): + LOG.warning("Suppressing requests library SSL Warnings") + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecureRequestWarning) + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecurePlatformWarning) + + def _extract_obj_method(self, obj): + filter_method_names = ('login', 'get', 'post', 'delete', 'put') + + def prefilter(m): + return (inspect.ismethod(m) and not inspect.isbuiltin(m) and + m.__name__ not in filter_method_names and + not m.__name__.startswith('_')) + + members = inspect.getmembers(obj, prefilter) + for method in members: + if method[0] in self.__dict__: + msg = _('Method %s already exists in rest client.' + ) % method[0] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.__dict__[method[0]] = method[1] + + def _init_object_methods(self): + def prefilter(m): + return inspect.isclass(m) and issubclass(m, CommonObject) + + obj_classes = inspect.getmembers(sys.modules[__name__], prefilter) + for cls in obj_classes: + self._extract_obj_method(cls[1](self)) + + def _try_login(self, manage_url): + url = manage_url + "xx/sessions" + data = {"username": self.san_user, + "password": self.san_password, + "scope": "0"} + if self.vstore_name: + data['vstorename'] = self.vstore_name + + r = self._session.post(url, data=json.dumps(data), + timeout=constants.LOGIN_SOCKET_TIMEOUT) + r.raise_for_status() + + result = r.json() + if _error_code(result) != 0: + msg = _("Failed to login URL %(url)s because of %(reason)s." + ) % {"url": url, "reason": result} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._session.headers['iBaseToken'] = result['data']['iBaseToken'] + self._login_device_id = result['data']['deviceid'] + self._login_url = manage_url + self._login_device_id + + if result['data']['accountstate'] in constants.PWD_EXPIRED_OR_INITIAL: + self._session.delete(self._login_url + "/sessions") + self._login_device_id = None + self._login_url = None + msg = ("Storage password has been expired or initial, " + "please change the password.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _init_http_head(self): + self._session = requests.Session() + session_headers = { + "Connection": "keep-alive", + "Content-Type": "application/json; charset=utf-8"} + if self.in_band_or_not: + session_headers["IBA-Target-Array"] = self.storage_sn + self._session.headers.update(session_headers) + LOG.debug('Update session heard: %s.', self._session.headers) + + def _loop_login(self): + self._init_http_head() + self._session.verify = self.cert_path if self.ssl_verify else False + + for url in self.san_address: + try: + self._session.mount(url.lower(), HostNameIgnoringAdapter()) + self._try_login(url) + except Exception: + LOG.exception('Failed to login server %s.', url) + else: + # Sort the login url to the last slot of san addresses, so that + # if this connection error, next time will try other url first. + self.san_address.remove(url) + self.san_address.append(url) + LOG.info('Login %s success.', url) + return + + self._session.close() + self._session = None + + msg = _("Failed to login storage with all rest URLs.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def login(self): + with self._session_lock.write_lock(): + self._loop_login() + + def _relogin(self, old_token): + with self._session_lock.write_lock(): + if (self._session and + self._session.headers.get('iBaseToken') != old_token): + LOG.info('Relogin has been done by other thread, ' + 'no need relogin again.') + return + + # Try to logout the original session first + self._logout() + self._loop_login() + + def _logout(self): + if not self._login_url: + return + + try: + r = self._session.delete(self._login_url + "/sessions") + r.raise_for_status() + except Exception: + LOG.exception("Failed to logout session from URL %s.", + self._login_url) + else: + result = r.json() + if _error_code(result) == 0: + LOG.info("Succeed to logout session from URL %(url)s.", + {"url": self._login_url}) + else: + LOG.warning("Failed to logout session from URL %(url)s " + "because of %(reason)s.", + {"url": self._login_url, "reason": result}) + finally: + self._session.close() + self._session = None + self._login_url = None + self._login_device_id = None + + @property + def device_id(self): + return self._login_device_id + + @rest_operation_wrapper + def get(self, url, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.get(url, timeout=timeout) + + @rest_operation_wrapper + def post(self, url, data, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.post(url, data=json.dumps(data), timeout=timeout) + + @rest_operation_wrapper + def put(self, url, data, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.put(url, data=json.dumps(data), timeout=timeout) + + @rest_operation_wrapper + def delete(self, url, timeout=constants.SOCKET_TIMEOUT, **kwargs): + if 'data' in kwargs: + return self._session.delete( + url, data=json.dumps(kwargs['data']), timeout=timeout) + else: + return self._session.delete(url, timeout=timeout) + + def add_pair_to_replication_group(self, group_id, pair_id): + data = {'ID': group_id, + 'RMLIST': [pair_id]} + result = self.put('/ADD_MIRROR', data=data) + _assert_result(result, 'Add pair %s to replication group %s error.', + pair_id, group_id) + + def remove_pair_from_replication_group(self, group_id, pair_id): + data = {'ID': group_id, + 'RMLIST': [pair_id]} + result = self.put('/DEL_MIRROR', data=data) + if _error_code(result) in (constants.REPLICATION_PAIR_NOT_EXIST, + constants.REPLICATION_GROUP_NOT_EXIST, + constants.REPLICATION_PAIR_NOT_GROUP_MEMBER, + constants.REPLICATION_GROUP_IS_EMPTY): + LOG.warning('Ignore error %s while remove replication pair ' + 'from group.', _error_code(result)) + return + _assert_result(result, 'Remove pair %s from replication group %s ' + 'error.', pair_id, group_id) + + def split_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SPLIT_CONSISTENCY_GROUP', data=data) + _assert_result(result, 'Split replication group %s error.', group_id) + + def sync_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SYNCHRONIZE_CONSISTENCY_GROUP', data=data) + if _error_code(result) == constants.REPLICATION_GROUP_IS_EMPTY: + LOG.info("Replication group %s to sync is empty.", group_id) + return + _assert_result(result, 'Sync replication group %s error.', group_id) + + def switch_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SWITCH_GROUP_ROLE', data=data) + _assert_result(result, 'Switch replication group %s error.', group_id) + + def get_array_info(self): + result = self.get('/system/') + _assert_result(result, 'Get array info error.') + return result['data'] + + def check_feature(self, obj): + try: + result = self.get('/%s/count' % obj, log_filter=True) + except requests.HTTPError as exc: + if exc.response.status_code == 404: + return False + raise + + return _error_code(result) == 0 + + def get_controller_id(self, controller_name): + result = self.get('/controller') + _assert_result(result, 'Get controllers error.') + + for con in result.get('data', []): + if con.get('LOCATION') == controller_name: + return con['ID'] + + def split_lunclone(self, clone_id): + data = { + "ID": clone_id, + "SPLITACTION": 1, + "ISCLONE": True, + "SPLITSPEED": 4, + } + result = self.put('/lunclone_split_switch', data=data) + if _error_code(result) == constants.CLONE_PAIR_SYNC_NOT_EXIST: + return + _assert_result(result, 'split clone lun %s error.', clone_id) + + def stop_split_lunclone(self, clone_id): + data = { + "ID": clone_id, + "SPLITACTION": 2, + "ISCLONE": True, + "SPLITSPEED": 4, + } + result = self.put('/lunclone_split_switch', data=data) + if _error_code(result) == constants.CLONE_PAIR_SYNC_COMPLETE: + LOG.info("Split lun finish, delete the clone pair %s." % clone_id) + self.delete_clone_pair(clone_id) + return + _assert_result(result, 'stop split clone lun %s error.', clone_id) + + def get_workload_type_id(self, workload_type_name): + url = "/workload_type?filter=NAME::%s" % workload_type_name + result = self.get(url) + _assert_result(result, 'Get workload type error') + + for item in result.get("data", []): + if item.get("NAME") == workload_type_name: + return item.get("ID") + + def get_workload_type_name(self, workload_type_id): + url = "/workload_type/%s" % workload_type_id + result = self.get(url) + _assert_result(result, 'Get workload type by id error') + return result.get("data", {}).get("NAME") diff --git a/Cinder/Bobcat/smartx.py b/Cinder/Bobcat/smartx.py new file mode 100644 index 0000000..1112948 --- /dev/null +++ b/Cinder/Bobcat/smartx.py @@ -0,0 +1,146 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class SmartQos(object): + def __init__(self, client, is_dorado_v6=False): + self.client = client + self.is_dorado_v6 = is_dorado_v6 + + def _check_qos_consistency(self, policy, qos): + for key in [k.upper() for k in constants.QOS_SPEC_KEYS]: + if qos.get(key, '0') != policy.get(key, '0'): + return False + return True + + def _change_lun_priority(self, qos, lun_id): + for key in qos: + if key.startswith('MIN') or key.startswith('LATENCY'): + data = {"IOPRIORITY": "3"} + self.client.update_lun(lun_id, data) + break + + @utils.synchronized('huawei_qos', external=True) + def add(self, qos, lun_id): + if not self.is_dorado_v6: + self._change_lun_priority(qos, lun_id) + qos_id = self.client.create_qos(qos, lun_id) + try: + self.client.activate_deactivate_qos(qos_id, True) + except exception.VolumeBackendAPIException: + self.remove(qos_id, lun_id) + raise + + return qos_id + + @utils.synchronized('huawei_qos', external=True) + def remove(self, qos_id, lun_id, qos_info=None): + if not qos_info: + qos_info = self.client.get_qos_info(qos_id) + lun_list = json.loads(qos_info['LUNLIST']) + if lun_id in lun_list: + lun_list.remove(lun_id) + + if len(lun_list) <= 0: + if qos_info['RUNNINGSTATUS'] != constants.QOS_INACTIVATED: + self.client.activate_deactivate_qos(qos_id, False) + self.client.delete_qos(qos_id) + else: + self.client.update_qos_luns(qos_id, lun_list) + + def update(self, qos_id, new_qos, lun_id): + qos_info = self.client.get_qos_info(qos_id) + if self._check_qos_consistency(qos_info, new_qos): + return + + self.remove(qos_id, lun_id, qos_info) + self.add(new_qos, lun_id) + + +class SmartPartition(object): + def __init__(self, client): + self.client = client + + def add(self, partitionname, lun_id): + partition_id = self.client.get_partition_id_by_name(partitionname) + if not partition_id: + msg = _('Cannot find partition by name %s.') % partitionname + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.client.add_lun_to_partition(lun_id, partition_id) + return partition_id + + def remove(self, partition_id, lun_id): + self.client.remove_lun_from_partition(lun_id, partition_id) + + def update(self, partition_id, partitionname, lun_id): + partition_info = self.client.get_partition_info_by_id(partition_id) + if partition_info['NAME'] == partitionname: + return + + self.remove(partition_id, lun_id) + self.add(partitionname, lun_id) + + def check_partition_valid(self, partitionname): + partition_id = self.client.get_partition_id_by_name(partitionname) + if not partition_id: + msg = _("Partition %s doesn't exist.") % partitionname + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +class SmartCache(object): + def __init__(self, client): + self.client = client + + def add(self, cachename, lun_id): + cache_id = self.client.get_cache_id_by_name(cachename) + if not cache_id: + msg = _('Cannot find cache by name %s.') % cachename + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.client.add_lun_to_cache(lun_id, cache_id) + return cache_id + + def remove(self, cache_id, lun_id): + self.client.remove_lun_from_cache(lun_id, cache_id) + + def update(self, cache_id, cachename, lun_id): + cache_info = self.client.get_cache_info_by_id(cache_id) + if cache_info['NAME'] == cachename: + return + + self.remove(cache_id, lun_id) + self.add(cachename, lun_id) + + def check_cache_valid(self, cachename): + cache_id = self.client.get_cache_id_by_name(cachename) + if not cache_id: + msg = _("Cache %s doesn't exit.") % cachename + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Mitaka/__init__.py b/Cinder/Mitaka/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Mitaka/__init__.py +++ b/Cinder/Mitaka/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Mitaka/constants.py b/Cinder/Mitaka/constants.py index 5fc622a..f8118c8 100644 --- a/Cinder/Mitaka/constants.py +++ b/Cinder/Mitaka/constants.py @@ -49,6 +49,10 @@ MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' +# ROCE INITIATOR CONSTANTS +NVME_ROCE_INITIATOR_TYPE = '57870' +ADDRESS_FAMILY_IPV4 = '0' + ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_BAD_STATUS_LINE = -400 @@ -57,6 +61,7 @@ SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 ERROR_VOLUME_NOT_EXIST = 1077939726 ERROR_LUN_NOT_EXIST = 1077936859 ERROR_SNAPSHOT_NOT_EXIST = 1077937880 @@ -73,6 +78,7 @@ CLONE_PAIR_SYNC_COMPLETE = 1073798176 CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 HOST_ALREADY_IN_HOSTGROUP = 1077937501 +OBJECT_ALREADY_EXIST = 1077948997 LUN_ALREADY_IN_LUNGROUP = 1077948997 HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 @@ -148,7 +154,7 @@ 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ['V3', 'V5', '18000', 'Dorado', 'V6'] -VALID_PROTOCOL = ['FC', 'iSCSI'] +VALID_PROTOCOL = ['FC', 'iSCSI', 'nvmeof'] VALID_WRITE_TYPE = ['1', '2'] VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' diff --git a/Cinder/Mitaka/huawei_conf.py b/Cinder/Mitaka/huawei_conf.py index e90ddb3..cd0a2c9 100644 --- a/Cinder/Mitaka/huawei_conf.py +++ b/Cinder/Mitaka/huawei_conf.py @@ -90,6 +90,7 @@ def update_config_value(self): self._force_delete_volume, self._iscsi_default_target_ip, self._iscsi_info, + self._roce_info, self._fc_info, self._ssl_cert_path, self._ssl_cert_verify, @@ -102,7 +103,8 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._rollback_speed, - self._set_qos_ignored_param) + self._set_qos_ignored_param, + self._get_rest_client_semaphore) for f in set_attr_funcs: f(xml_root) @@ -440,7 +442,7 @@ def _parse_rmt_iscsi_info(self, iscsi_info): # Step 5, make initiators configure dict, convert to: # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] get_opts = lambda x: x.split(':', 1) initiator_infos = map(lambda x: dict(map(get_opts, x)), initiator_infos) @@ -474,6 +476,8 @@ def get_hypermetro_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -507,6 +511,8 @@ def get_replication_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -529,6 +535,7 @@ def get_local_device(self): 'storage_pools': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'fc_info': self.conf.fc_info, + 'roce_info': self.conf.roce_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, 'in_band_or_not': self.conf.in_band_or_not, 'storage_sn': self.conf.storage_sn, @@ -679,3 +686,32 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _roce_info(self, xml_root): + nodes = xml_root.findall('RoCE/Initiator') + if nodes is None: + setattr(self.conf, 'roce_info', []) + return + + roce_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + roce_info.append(props) + + self._check_hostname_regex_config(roce_info) + setattr(self.conf, 'roce_info', roce_info) diff --git a/Cinder/Mitaka/huawei_driver.py b/Cinder/Mitaka/huawei_driver.py index 62f08d4..00f3729 100644 --- a/Cinder/Mitaka/huawei_driver.py +++ b/Cinder/Mitaka/huawei_driver.py @@ -81,7 +81,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -3642,3 +3642,226 @@ def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id + + +class HuaweiROCEDriver(HuaweiBaseDriver): + """RoCE driver for Huawei storage arrays. + + Version history: + 2.6.4 - start to support RoCE. + """ + + def __init__(self, *args, **kwargs): + super(HuaweiROCEDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'nvmeof' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target RoCE information.""" + self._check_roce_params(volume, connector) + + # Attach local lun. + roce_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_roce_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + roce_info.get('data').get('target_portals').extend( + rmt_roce_info.get('data').get('target_portals')) + roce_info.get('data').get('target_luns').extend( + rmt_roce_info.get('data').get('target_luns')) + + LOG.info('initialize_common_connection_roce, ' + 'return data is: %s.', roce_info) + return roce_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('Initialize RoCE connection for volume %(id)s, ' + 'connector info %(conn)s. array is in %(location)s.', + {'id': volume.id, 'conn': connector, + 'location': 'local' if local else 'remote'}) + + host_nqn = connector.get("host_nqn") + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + target_ips = client.get_roce_params(connector) + + host_id = client.add_host_with_check( + connector.get('host'), self.is_dorado_v6, host_nqn) + + try: + client.ensure_roceini_added(host_nqn, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping( + lun_info, hostgroup_id, host_id, + lun_type=lun_type, hypermetro_lun=hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_info, lun_type) + LOG.info('initialize_connection, host lun id is: %(id)s. ' + 'View info is %(view)s.', + {'id': host_lun_id, 'view': map_info}) + host_lun_id = int(host_lun_id) + mapping_info = { + 'target_portals': ['%s:4420' % ip for ip in target_ips], + 'target_luns': [host_lun_id] * len(target_ips), + 'transport_type': 'rdma', + 'host_nqn': host_nqn, + 'discard': True, + 'volume_nguid': lun_info.get("NGUID") + } + conn = { + 'driver_volume_type': 'nvmeof', + 'data': mapping_info + } + LOG.info('Initialize RoCE connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + self._check_roce_params(volume, connector) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector.get('host_nqn') + host_name = connector.get('host') + + LOG.info('terminate_connection: initiator name: %(ini)s, LUN ID: %(' + 'lunid)s, lun type: %(lun_type)s, connector: %(' + 'connector)s.', {'ini': initiator_name, 'lunid': lun_id, + 'lun_type': lun_type, + 'connector': connector}) + + lungroup_id = None + portgroup_id = None + view_id = None + + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + portgroup_id = client.get_portgroup_by_view(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, "lungroup_id": lungroup_id}) + if self.configuration.retain_storage_mapping: + return + + mapping_param = {'host_id': host_id, 'initiator_name': initiator_name, + 'lungroup_id': lungroup_id, 'view_id': view_id, + 'portgroup_id': portgroup_id} + self._delete_storage_mapping(client, mapping_param) + + def _delete_storage_mapping(self, client, mapping_param): + left_lun_num = -1 + lungroup_id = mapping_param.get('lungroup_id') + view_id = mapping_param.get('view_id') + portgroup_id = mapping_param.get('portgroup_id') + initiator_name = mapping_param.get('initiator_name') + host_id = mapping_param.get('host_id') + if lungroup_id: + left_lun_num = client.get_obj_count_from_lungroup(lungroup_id) + if view_id and (int(left_lun_num) <= 0): + if portgroup_id and client.is_portgroup_associated_to_view( + view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + + client.delete_lungroup(lungroup_id) + + if client.is_roce_initiator_associated_to_host( + initiator_name, host_id): + client.remove_roce_initiator_from_host(initiator_name, host_id) + + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + + client.delete_mapping_view(view_id) + + def _check_roce_params(self, volume, connector): + if not volume or not connector: + msg = _( + '%(param)s is none.' + % {'param': 'volume' if not volume else 'connector'}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not volume.id: + msg = _( + 'volume param is error. volume is %(volume)s.' + % {'volume': volume}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not connector.get('host_nqn') or not connector.get('host'): + msg = _( + 'connector param is error. connector is %(connector)s.' + % {'connector': connector}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.is_dorado_v6: + msg = _("Current storage doesn't support RoCE.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/Cinder/Mitaka/rest_client.py b/Cinder/Mitaka/rest_client.py index 93a44ab..4df29e2 100644 --- a/Cinder/Mitaka/rest_client.py +++ b/Cinder/Mitaka/rest_client.py @@ -58,13 +58,14 @@ def __init__(self, configuration, san_address, san_user, san_password, self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.roce_info = kwargs.get('roce_info', self.configuration.roce_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) self.metro_domain = kwargs.get('metro_domain', None) self.metro_sync_completed = strutils.bool_from_string( kwargs.get('metro_sync_completed')) - self.semaphore = threading.Semaphore(20) + self.semaphore = threading.Semaphore(self.configuration.semaphore) self.call_lock = lockutils.ReaderWriterLock() self.session = None self.url = None @@ -154,15 +155,14 @@ def do_call(self, url=None, data=None, method=None, } res_json = res.json() + response_time = res.elapsed.total_seconds() if not filter_flag: - LOG.info('\nRequest URL: %(url)s\n' - 'Call Method: %(method)s\n' - 'Request Data: %(data)s\n' - 'Response Data:%(res)s', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) + LOG.info('Request URL: %(url)s, Call Method: %(method)s,' + 'Request Data: %(data)s, Response Data:%(res)s,' + 'Response Time:%(res_time)s', + {'url': url, 'method': method, + 'data': data, 'res': res_json, + 'res_time': response_time}) return res_json @@ -840,7 +840,7 @@ def add_host_with_check(self, host_name, is_dorado_v6, initiator): host_id = huawei_utils.get_host_id(self, host_name) new_alua_info = {} if self.is_dorado_v6: - info = self.iscsi_info or self.fc_info + info = self.iscsi_info or self.fc_info or self.roce_info new_alua_info = self._find_new_alua_info( info, host_name, initiator) if host_id: @@ -2992,3 +2992,190 @@ def cancel_rollback_snapshot(self, snapshot_id): result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Cancel rollback snapshot %s error.' % snapshot_id) + + def ensure_roceini_added(self, initiator_name, host_id): + # Check and associate RoCE initiator to host on array + initiator = self._get_roceini_by_id(initiator_name) + + if not initiator: + self._add_roceini_to_array(initiator_name) + self._associate_roceini_to_host(initiator_name, host_id) + return + + if initiator.get('ISFREE') == "true": + self._associate_roceini_to_host(initiator_name, host_id) + return + + # if initiator was associated to another host + if initiator.get("PARENTID") != host_id: + msg = (_("Initiator %(ini)s has been added to another host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTNAME')}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _get_roceini_by_id(self, nqn): + """Get RoCE initiator from array.""" + url = '/NVMe_over_RoCE_initiator/%s' % nqn + result = self.call(url, None, "GET") + + if result.get('error', {}).get('code') == constants.FC_INITIATOR_NOT_EXIST: + LOG.warning('RoCE NQN %s not exist.', nqn) + return {} + self._assert_rest_result(result, 'get RoCE NQN %s error.' % nqn) + + return result.get("data", {}) + + def _add_roceini_to_array(self, nqn): + """Add a new RoCE initiator to storage device.""" + url = "/NVMe_over_RoCE_initiator" + data = {"ID": nqn} + result = self.call(url, data, "POST") + if result.get('error', {}).get('code') == constants.OBJECT_ALREADY_EXIST: + LOG.warning('RoCE NQN %s has already exist in array.', nqn) + else: + self._assert_rest_result( + result, _('Add RoCE initiator %s to array error.' % nqn)) + + def _associate_roceini_to_host(self, nqn, host_id): + """Associate RoCE initiator with the host.""" + url = "/host/create_associate" + data = {"ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ID": host_id, + "ASSOCIATEOBJID": nqn} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _("Associate RoCE initiator %(ini)s to host %(host)s " + "error." % {"ini": nqn, "host": host_id})) + + def is_roce_initiator_associated_to_host(self, initiator_name, host_id): + initiator = self._get_roceini_by_id(initiator_name) + if not initiator or initiator.get('ISFREE') == "true": + return False + + if initiator.get('PARENTID') == host_id: + return True + else: + msg = _("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTID')} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def remove_roce_initiator_from_host(self, initiator_name, host_id): + url = "/host/remove_associate" + data = {"ID": host_id, + "ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ASSOCIATEOBJID": initiator_name} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Remove RoCE initiator from host error.')) + + def get_roce_params(self, connector): + """Get target ROCE params, including IP.""" + host_nqn = connector.get('host_nqn') + host_name = connector.get('host') + target_ips = self._get_roce_target_ips(host_nqn, host_name) + + logic_ports = self.get_roce_logical_ports() + result = [] + for ip in target_ips: + if self._is_roce_target_ip_in_array(ip, logic_ports): + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + ip = '[' + ip + ']' + result.append(ip) + + if not result: + err_msg = _('There is no any logic ips exist on array of the ' + 'configured target_ip %s in conf file' % target_ips) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return result + + def _get_roce_target_ips(self, initiator, host_name): + target_ips = self._get_target_ips_by_initiator_name(initiator) + + if not target_ips: + target_ips = self._get_target_ips_by_host_name(host_name) + + if not target_ips: + msg = (_( + 'get_roce_params: Failed to get target IP ' + 'for host %(host)s, please check config file.') + % {'host': host_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get the default ip: %s.', target_ips) + return target_ips + + def _get_roce_logic_ports(self, start, end, params): + url = ("/lif?range=[%(start)s-%(end)s]" + % {"start": six.text_type(start), "end": six.text_type(end)}) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('get RoCE Logic Ports error.')) + return result.get('data', []) + + def get_roce_logical_ports(self): + all_logic_ports = self._get_info_by_range( + self._get_roce_logic_ports) + return all_logic_ports + + def _is_roce_target_ip_in_array(self, ip, logic_ports): + for logic_port in logic_ports: + if logic_port.get('ADDRESSFAMILY') == constants.ADDRESS_FAMILY_IPV4: + if ip == logic_port.get('IPV4ADDR'): + return True + else: + if self._is_same_ipv6(ip, logic_port.get('IPV6ADDR')): + return True + + return False + + @staticmethod + def _is_same_ipv6(left_ip, right_ip): + format_left_ip = str( + netaddr.IPAddress(left_ip).format(dialect=netaddr.ipv6_compact)) + format_right_ip = str( + netaddr.IPAddress(right_ip).format(dialect=netaddr.ipv6_compact)) + if format_left_ip == format_right_ip: + return True + + return False + + @staticmethod + def _get_target_ip_list(roce_info, target_ips): + for target_ip in roce_info.get('TargetIP').split(): + if target_ip.strip(): + target_ips.append(target_ip) + + def _get_target_ips_by_initiator_name(self, initiator): + target_ips = [] + for info in self.roce_info: + config_initiator = info.get('Name') + if not config_initiator: + continue + if config_initiator == initiator: + self._get_target_ip_list(info, target_ips) + return target_ips + + def _get_target_ips_by_host_name(self, host_name): + target_ips = [] + temp_target_ips = [] + for info in self.roce_info: + config_host_name = info.get('HostName') + if not config_host_name: + continue + if config_host_name == '*': + self._get_target_ip_list(info, temp_target_ips) + elif re.search(config_host_name, host_name): + self._get_target_ip_list(info, target_ips) + break + + if not target_ips and temp_target_ips: + target_ips = temp_target_ips + + return target_ips diff --git a/Cinder/Newton/__init__.py b/Cinder/Newton/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Newton/__init__.py +++ b/Cinder/Newton/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Newton/constants.py b/Cinder/Newton/constants.py index 5fc622a..f8118c8 100644 --- a/Cinder/Newton/constants.py +++ b/Cinder/Newton/constants.py @@ -49,6 +49,10 @@ MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' +# ROCE INITIATOR CONSTANTS +NVME_ROCE_INITIATOR_TYPE = '57870' +ADDRESS_FAMILY_IPV4 = '0' + ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_BAD_STATUS_LINE = -400 @@ -57,6 +61,7 @@ SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 ERROR_VOLUME_NOT_EXIST = 1077939726 ERROR_LUN_NOT_EXIST = 1077936859 ERROR_SNAPSHOT_NOT_EXIST = 1077937880 @@ -73,6 +78,7 @@ CLONE_PAIR_SYNC_COMPLETE = 1073798176 CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 HOST_ALREADY_IN_HOSTGROUP = 1077937501 +OBJECT_ALREADY_EXIST = 1077948997 LUN_ALREADY_IN_LUNGROUP = 1077948997 HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 @@ -148,7 +154,7 @@ 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ['V3', 'V5', '18000', 'Dorado', 'V6'] -VALID_PROTOCOL = ['FC', 'iSCSI'] +VALID_PROTOCOL = ['FC', 'iSCSI', 'nvmeof'] VALID_WRITE_TYPE = ['1', '2'] VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' diff --git a/Cinder/Newton/huawei_conf.py b/Cinder/Newton/huawei_conf.py index e90ddb3..cd0a2c9 100644 --- a/Cinder/Newton/huawei_conf.py +++ b/Cinder/Newton/huawei_conf.py @@ -90,6 +90,7 @@ def update_config_value(self): self._force_delete_volume, self._iscsi_default_target_ip, self._iscsi_info, + self._roce_info, self._fc_info, self._ssl_cert_path, self._ssl_cert_verify, @@ -102,7 +103,8 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._rollback_speed, - self._set_qos_ignored_param) + self._set_qos_ignored_param, + self._get_rest_client_semaphore) for f in set_attr_funcs: f(xml_root) @@ -440,7 +442,7 @@ def _parse_rmt_iscsi_info(self, iscsi_info): # Step 5, make initiators configure dict, convert to: # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] get_opts = lambda x: x.split(':', 1) initiator_infos = map(lambda x: dict(map(get_opts, x)), initiator_infos) @@ -474,6 +476,8 @@ def get_hypermetro_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -507,6 +511,8 @@ def get_replication_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -529,6 +535,7 @@ def get_local_device(self): 'storage_pools': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'fc_info': self.conf.fc_info, + 'roce_info': self.conf.roce_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, 'in_band_or_not': self.conf.in_band_or_not, 'storage_sn': self.conf.storage_sn, @@ -679,3 +686,32 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _roce_info(self, xml_root): + nodes = xml_root.findall('RoCE/Initiator') + if nodes is None: + setattr(self.conf, 'roce_info', []) + return + + roce_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + roce_info.append(props) + + self._check_hostname_regex_config(roce_info) + setattr(self.conf, 'roce_info', roce_info) diff --git a/Cinder/Newton/huawei_driver.py b/Cinder/Newton/huawei_driver.py index 828e5cf..bce97e7 100644 --- a/Cinder/Newton/huawei_driver.py +++ b/Cinder/Newton/huawei_driver.py @@ -81,7 +81,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -3642,3 +3642,226 @@ def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id + + +class HuaweiROCEDriver(HuaweiBaseDriver): + """RoCE driver for Huawei storage arrays. + + Version history: + 2.6.4 - start to support RoCE. + """ + + def __init__(self, *args, **kwargs): + super(HuaweiROCEDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'nvmeof' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target RoCE information.""" + self._check_roce_params(volume, connector) + + # Attach local lun. + roce_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_roce_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + roce_info.get('data').get('target_portals').extend( + rmt_roce_info.get('data').get('target_portals')) + roce_info.get('data').get('target_luns').extend( + rmt_roce_info.get('data').get('target_luns')) + + LOG.info('initialize_common_connection_roce, ' + 'return data is: %s.', roce_info) + return roce_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('Initialize RoCE connection for volume %(id)s, ' + 'connector info %(conn)s. array is in %(location)s.', + {'id': volume.id, 'conn': connector, + 'location': 'local' if local else 'remote'}) + + host_nqn = connector.get("host_nqn") + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + target_ips = client.get_roce_params(connector) + + host_id = client.add_host_with_check( + connector.get('host'), self.is_dorado_v6, host_nqn) + + try: + client.ensure_roceini_added(host_nqn, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping( + lun_info, hostgroup_id, host_id, + lun_type=lun_type, hypermetro_lun=hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_info, lun_type) + LOG.info('initialize_connection, host lun id is: %(id)s. ' + 'View info is %(view)s.', + {'id': host_lun_id, 'view': map_info}) + host_lun_id = int(host_lun_id) + mapping_info = { + 'target_portals': ['%s:4420' % ip for ip in target_ips], + 'target_luns': [host_lun_id] * len(target_ips), + 'transport_type': 'rdma', + 'host_nqn': host_nqn, + 'discard': True, + 'volume_nguid': lun_info.get("NGUID") + } + conn = { + 'driver_volume_type': 'nvmeof', + 'data': mapping_info + } + LOG.info('Initialize RoCE connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + self._check_roce_params(volume, connector) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector.get('host_nqn') + host_name = connector.get('host') + + LOG.info('terminate_connection: initiator name: %(ini)s, LUN ID: %(' + 'lunid)s, lun type: %(lun_type)s, connector: %(' + 'connector)s.', {'ini': initiator_name, 'lunid': lun_id, + 'lun_type': lun_type, + 'connector': connector}) + + lungroup_id = None + portgroup_id = None + view_id = None + + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + portgroup_id = client.get_portgroup_by_view(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, "lungroup_id": lungroup_id}) + if self.configuration.retain_storage_mapping: + return + + mapping_param = {'host_id': host_id, 'initiator_name': initiator_name, + 'lungroup_id': lungroup_id, 'view_id': view_id, + 'portgroup_id': portgroup_id} + self._delete_storage_mapping(client, mapping_param) + + def _delete_storage_mapping(self, client, mapping_param): + left_lun_num = -1 + lungroup_id = mapping_param.get('lungroup_id') + view_id = mapping_param.get('view_id') + portgroup_id = mapping_param.get('portgroup_id') + initiator_name = mapping_param.get('initiator_name') + host_id = mapping_param.get('host_id') + if lungroup_id: + left_lun_num = client.get_obj_count_from_lungroup(lungroup_id) + if view_id and (int(left_lun_num) <= 0): + if portgroup_id and client.is_portgroup_associated_to_view( + view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + + client.delete_lungroup(lungroup_id) + + if client.is_roce_initiator_associated_to_host( + initiator_name, host_id): + client.remove_roce_initiator_from_host(initiator_name, host_id) + + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + + client.delete_mapping_view(view_id) + + def _check_roce_params(self, volume, connector): + if not volume or not connector: + msg = _( + '%(param)s is none.' + % {'param': 'volume' if not volume else 'connector'}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not volume.id: + msg = _( + 'volume param is error. volume is %(volume)s.' + % {'volume': volume}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not connector.get('host_nqn') or not connector.get('host'): + msg = _( + 'connector param is error. connector is %(connector)s.' + % {'connector': connector}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.is_dorado_v6: + msg = _("Current storage doesn't support RoCE.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/Cinder/Newton/rest_client.py b/Cinder/Newton/rest_client.py index 93a44ab..4df29e2 100644 --- a/Cinder/Newton/rest_client.py +++ b/Cinder/Newton/rest_client.py @@ -58,13 +58,14 @@ def __init__(self, configuration, san_address, san_user, san_password, self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.roce_info = kwargs.get('roce_info', self.configuration.roce_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) self.metro_domain = kwargs.get('metro_domain', None) self.metro_sync_completed = strutils.bool_from_string( kwargs.get('metro_sync_completed')) - self.semaphore = threading.Semaphore(20) + self.semaphore = threading.Semaphore(self.configuration.semaphore) self.call_lock = lockutils.ReaderWriterLock() self.session = None self.url = None @@ -154,15 +155,14 @@ def do_call(self, url=None, data=None, method=None, } res_json = res.json() + response_time = res.elapsed.total_seconds() if not filter_flag: - LOG.info('\nRequest URL: %(url)s\n' - 'Call Method: %(method)s\n' - 'Request Data: %(data)s\n' - 'Response Data:%(res)s', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) + LOG.info('Request URL: %(url)s, Call Method: %(method)s,' + 'Request Data: %(data)s, Response Data:%(res)s,' + 'Response Time:%(res_time)s', + {'url': url, 'method': method, + 'data': data, 'res': res_json, + 'res_time': response_time}) return res_json @@ -840,7 +840,7 @@ def add_host_with_check(self, host_name, is_dorado_v6, initiator): host_id = huawei_utils.get_host_id(self, host_name) new_alua_info = {} if self.is_dorado_v6: - info = self.iscsi_info or self.fc_info + info = self.iscsi_info or self.fc_info or self.roce_info new_alua_info = self._find_new_alua_info( info, host_name, initiator) if host_id: @@ -2992,3 +2992,190 @@ def cancel_rollback_snapshot(self, snapshot_id): result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Cancel rollback snapshot %s error.' % snapshot_id) + + def ensure_roceini_added(self, initiator_name, host_id): + # Check and associate RoCE initiator to host on array + initiator = self._get_roceini_by_id(initiator_name) + + if not initiator: + self._add_roceini_to_array(initiator_name) + self._associate_roceini_to_host(initiator_name, host_id) + return + + if initiator.get('ISFREE') == "true": + self._associate_roceini_to_host(initiator_name, host_id) + return + + # if initiator was associated to another host + if initiator.get("PARENTID") != host_id: + msg = (_("Initiator %(ini)s has been added to another host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTNAME')}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _get_roceini_by_id(self, nqn): + """Get RoCE initiator from array.""" + url = '/NVMe_over_RoCE_initiator/%s' % nqn + result = self.call(url, None, "GET") + + if result.get('error', {}).get('code') == constants.FC_INITIATOR_NOT_EXIST: + LOG.warning('RoCE NQN %s not exist.', nqn) + return {} + self._assert_rest_result(result, 'get RoCE NQN %s error.' % nqn) + + return result.get("data", {}) + + def _add_roceini_to_array(self, nqn): + """Add a new RoCE initiator to storage device.""" + url = "/NVMe_over_RoCE_initiator" + data = {"ID": nqn} + result = self.call(url, data, "POST") + if result.get('error', {}).get('code') == constants.OBJECT_ALREADY_EXIST: + LOG.warning('RoCE NQN %s has already exist in array.', nqn) + else: + self._assert_rest_result( + result, _('Add RoCE initiator %s to array error.' % nqn)) + + def _associate_roceini_to_host(self, nqn, host_id): + """Associate RoCE initiator with the host.""" + url = "/host/create_associate" + data = {"ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ID": host_id, + "ASSOCIATEOBJID": nqn} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _("Associate RoCE initiator %(ini)s to host %(host)s " + "error." % {"ini": nqn, "host": host_id})) + + def is_roce_initiator_associated_to_host(self, initiator_name, host_id): + initiator = self._get_roceini_by_id(initiator_name) + if not initiator or initiator.get('ISFREE') == "true": + return False + + if initiator.get('PARENTID') == host_id: + return True + else: + msg = _("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTID')} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def remove_roce_initiator_from_host(self, initiator_name, host_id): + url = "/host/remove_associate" + data = {"ID": host_id, + "ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ASSOCIATEOBJID": initiator_name} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Remove RoCE initiator from host error.')) + + def get_roce_params(self, connector): + """Get target ROCE params, including IP.""" + host_nqn = connector.get('host_nqn') + host_name = connector.get('host') + target_ips = self._get_roce_target_ips(host_nqn, host_name) + + logic_ports = self.get_roce_logical_ports() + result = [] + for ip in target_ips: + if self._is_roce_target_ip_in_array(ip, logic_ports): + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + ip = '[' + ip + ']' + result.append(ip) + + if not result: + err_msg = _('There is no any logic ips exist on array of the ' + 'configured target_ip %s in conf file' % target_ips) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return result + + def _get_roce_target_ips(self, initiator, host_name): + target_ips = self._get_target_ips_by_initiator_name(initiator) + + if not target_ips: + target_ips = self._get_target_ips_by_host_name(host_name) + + if not target_ips: + msg = (_( + 'get_roce_params: Failed to get target IP ' + 'for host %(host)s, please check config file.') + % {'host': host_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get the default ip: %s.', target_ips) + return target_ips + + def _get_roce_logic_ports(self, start, end, params): + url = ("/lif?range=[%(start)s-%(end)s]" + % {"start": six.text_type(start), "end": six.text_type(end)}) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('get RoCE Logic Ports error.')) + return result.get('data', []) + + def get_roce_logical_ports(self): + all_logic_ports = self._get_info_by_range( + self._get_roce_logic_ports) + return all_logic_ports + + def _is_roce_target_ip_in_array(self, ip, logic_ports): + for logic_port in logic_ports: + if logic_port.get('ADDRESSFAMILY') == constants.ADDRESS_FAMILY_IPV4: + if ip == logic_port.get('IPV4ADDR'): + return True + else: + if self._is_same_ipv6(ip, logic_port.get('IPV6ADDR')): + return True + + return False + + @staticmethod + def _is_same_ipv6(left_ip, right_ip): + format_left_ip = str( + netaddr.IPAddress(left_ip).format(dialect=netaddr.ipv6_compact)) + format_right_ip = str( + netaddr.IPAddress(right_ip).format(dialect=netaddr.ipv6_compact)) + if format_left_ip == format_right_ip: + return True + + return False + + @staticmethod + def _get_target_ip_list(roce_info, target_ips): + for target_ip in roce_info.get('TargetIP').split(): + if target_ip.strip(): + target_ips.append(target_ip) + + def _get_target_ips_by_initiator_name(self, initiator): + target_ips = [] + for info in self.roce_info: + config_initiator = info.get('Name') + if not config_initiator: + continue + if config_initiator == initiator: + self._get_target_ip_list(info, target_ips) + return target_ips + + def _get_target_ips_by_host_name(self, host_name): + target_ips = [] + temp_target_ips = [] + for info in self.roce_info: + config_host_name = info.get('HostName') + if not config_host_name: + continue + if config_host_name == '*': + self._get_target_ip_list(info, temp_target_ips) + elif re.search(config_host_name, host_name): + self._get_target_ip_list(info, target_ips) + break + + if not target_ips and temp_target_ips: + target_ips = temp_target_ips + + return target_ips diff --git a/Cinder/Ocata/__init__.py b/Cinder/Ocata/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Ocata/__init__.py +++ b/Cinder/Ocata/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Ocata/constants.py b/Cinder/Ocata/constants.py index 5fc622a..f8118c8 100644 --- a/Cinder/Ocata/constants.py +++ b/Cinder/Ocata/constants.py @@ -49,6 +49,10 @@ MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' +# ROCE INITIATOR CONSTANTS +NVME_ROCE_INITIATOR_TYPE = '57870' +ADDRESS_FAMILY_IPV4 = '0' + ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_BAD_STATUS_LINE = -400 @@ -57,6 +61,7 @@ SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 ERROR_VOLUME_NOT_EXIST = 1077939726 ERROR_LUN_NOT_EXIST = 1077936859 ERROR_SNAPSHOT_NOT_EXIST = 1077937880 @@ -73,6 +78,7 @@ CLONE_PAIR_SYNC_COMPLETE = 1073798176 CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 HOST_ALREADY_IN_HOSTGROUP = 1077937501 +OBJECT_ALREADY_EXIST = 1077948997 LUN_ALREADY_IN_LUNGROUP = 1077948997 HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 @@ -148,7 +154,7 @@ 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ['V3', 'V5', '18000', 'Dorado', 'V6'] -VALID_PROTOCOL = ['FC', 'iSCSI'] +VALID_PROTOCOL = ['FC', 'iSCSI', 'nvmeof'] VALID_WRITE_TYPE = ['1', '2'] VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' diff --git a/Cinder/Ocata/huawei_conf.py b/Cinder/Ocata/huawei_conf.py index e90ddb3..cd0a2c9 100644 --- a/Cinder/Ocata/huawei_conf.py +++ b/Cinder/Ocata/huawei_conf.py @@ -90,6 +90,7 @@ def update_config_value(self): self._force_delete_volume, self._iscsi_default_target_ip, self._iscsi_info, + self._roce_info, self._fc_info, self._ssl_cert_path, self._ssl_cert_verify, @@ -102,7 +103,8 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._rollback_speed, - self._set_qos_ignored_param) + self._set_qos_ignored_param, + self._get_rest_client_semaphore) for f in set_attr_funcs: f(xml_root) @@ -440,7 +442,7 @@ def _parse_rmt_iscsi_info(self, iscsi_info): # Step 5, make initiators configure dict, convert to: # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] get_opts = lambda x: x.split(':', 1) initiator_infos = map(lambda x: dict(map(get_opts, x)), initiator_infos) @@ -474,6 +476,8 @@ def get_hypermetro_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -507,6 +511,8 @@ def get_replication_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -529,6 +535,7 @@ def get_local_device(self): 'storage_pools': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'fc_info': self.conf.fc_info, + 'roce_info': self.conf.roce_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, 'in_band_or_not': self.conf.in_band_or_not, 'storage_sn': self.conf.storage_sn, @@ -679,3 +686,32 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _roce_info(self, xml_root): + nodes = xml_root.findall('RoCE/Initiator') + if nodes is None: + setattr(self.conf, 'roce_info', []) + return + + roce_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + roce_info.append(props) + + self._check_hostname_regex_config(roce_info) + setattr(self.conf, 'roce_info', roce_info) diff --git a/Cinder/Ocata/huawei_driver.py b/Cinder/Ocata/huawei_driver.py index eff5450..6f7b0ad 100644 --- a/Cinder/Ocata/huawei_driver.py +++ b/Cinder/Ocata/huawei_driver.py @@ -81,7 +81,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -3642,3 +3642,226 @@ def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id + + +class HuaweiROCEDriver(HuaweiBaseDriver): + """RoCE driver for Huawei storage arrays. + + Version history: + 2.6.4 - start to support RoCE. + """ + + def __init__(self, *args, **kwargs): + super(HuaweiROCEDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'nvmeof' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target RoCE information.""" + self._check_roce_params(volume, connector) + + # Attach local lun. + roce_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_roce_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + roce_info.get('data').get('target_portals').extend( + rmt_roce_info.get('data').get('target_portals')) + roce_info.get('data').get('target_luns').extend( + rmt_roce_info.get('data').get('target_luns')) + + LOG.info('initialize_common_connection_roce, ' + 'return data is: %s.', roce_info) + return roce_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('Initialize RoCE connection for volume %(id)s, ' + 'connector info %(conn)s. array is in %(location)s.', + {'id': volume.id, 'conn': connector, + 'location': 'local' if local else 'remote'}) + + host_nqn = connector.get("host_nqn") + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + target_ips = client.get_roce_params(connector) + + host_id = client.add_host_with_check( + connector.get('host'), self.is_dorado_v6, host_nqn) + + try: + client.ensure_roceini_added(host_nqn, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping( + lun_info, hostgroup_id, host_id, + lun_type=lun_type, hypermetro_lun=hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_info, lun_type) + LOG.info('initialize_connection, host lun id is: %(id)s. ' + 'View info is %(view)s.', + {'id': host_lun_id, 'view': map_info}) + host_lun_id = int(host_lun_id) + mapping_info = { + 'target_portals': ['%s:4420' % ip for ip in target_ips], + 'target_luns': [host_lun_id] * len(target_ips), + 'transport_type': 'rdma', + 'host_nqn': host_nqn, + 'discard': True, + 'volume_nguid': lun_info.get("NGUID") + } + conn = { + 'driver_volume_type': 'nvmeof', + 'data': mapping_info + } + LOG.info('Initialize RoCE connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + self._check_roce_params(volume, connector) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector.get('host_nqn') + host_name = connector.get('host') + + LOG.info('terminate_connection: initiator name: %(ini)s, LUN ID: %(' + 'lunid)s, lun type: %(lun_type)s, connector: %(' + 'connector)s.', {'ini': initiator_name, 'lunid': lun_id, + 'lun_type': lun_type, + 'connector': connector}) + + lungroup_id = None + portgroup_id = None + view_id = None + + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + portgroup_id = client.get_portgroup_by_view(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, "lungroup_id": lungroup_id}) + if self.configuration.retain_storage_mapping: + return + + mapping_param = {'host_id': host_id, 'initiator_name': initiator_name, + 'lungroup_id': lungroup_id, 'view_id': view_id, + 'portgroup_id': portgroup_id} + self._delete_storage_mapping(client, mapping_param) + + def _delete_storage_mapping(self, client, mapping_param): + left_lun_num = -1 + lungroup_id = mapping_param.get('lungroup_id') + view_id = mapping_param.get('view_id') + portgroup_id = mapping_param.get('portgroup_id') + initiator_name = mapping_param.get('initiator_name') + host_id = mapping_param.get('host_id') + if lungroup_id: + left_lun_num = client.get_obj_count_from_lungroup(lungroup_id) + if view_id and (int(left_lun_num) <= 0): + if portgroup_id and client.is_portgroup_associated_to_view( + view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + + client.delete_lungroup(lungroup_id) + + if client.is_roce_initiator_associated_to_host( + initiator_name, host_id): + client.remove_roce_initiator_from_host(initiator_name, host_id) + + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + + client.delete_mapping_view(view_id) + + def _check_roce_params(self, volume, connector): + if not volume or not connector: + msg = _( + '%(param)s is none.' + % {'param': 'volume' if not volume else 'connector'}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not volume.id: + msg = _( + 'volume param is error. volume is %(volume)s.' + % {'volume': volume}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not connector.get('host_nqn') or not connector.get('host'): + msg = _( + 'connector param is error. connector is %(connector)s.' + % {'connector': connector}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.is_dorado_v6: + msg = _("Current storage doesn't support RoCE.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/Cinder/Ocata/rest_client.py b/Cinder/Ocata/rest_client.py index 93a44ab..4df29e2 100644 --- a/Cinder/Ocata/rest_client.py +++ b/Cinder/Ocata/rest_client.py @@ -58,13 +58,14 @@ def __init__(self, configuration, san_address, san_user, san_password, self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.roce_info = kwargs.get('roce_info', self.configuration.roce_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) self.metro_domain = kwargs.get('metro_domain', None) self.metro_sync_completed = strutils.bool_from_string( kwargs.get('metro_sync_completed')) - self.semaphore = threading.Semaphore(20) + self.semaphore = threading.Semaphore(self.configuration.semaphore) self.call_lock = lockutils.ReaderWriterLock() self.session = None self.url = None @@ -154,15 +155,14 @@ def do_call(self, url=None, data=None, method=None, } res_json = res.json() + response_time = res.elapsed.total_seconds() if not filter_flag: - LOG.info('\nRequest URL: %(url)s\n' - 'Call Method: %(method)s\n' - 'Request Data: %(data)s\n' - 'Response Data:%(res)s', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) + LOG.info('Request URL: %(url)s, Call Method: %(method)s,' + 'Request Data: %(data)s, Response Data:%(res)s,' + 'Response Time:%(res_time)s', + {'url': url, 'method': method, + 'data': data, 'res': res_json, + 'res_time': response_time}) return res_json @@ -840,7 +840,7 @@ def add_host_with_check(self, host_name, is_dorado_v6, initiator): host_id = huawei_utils.get_host_id(self, host_name) new_alua_info = {} if self.is_dorado_v6: - info = self.iscsi_info or self.fc_info + info = self.iscsi_info or self.fc_info or self.roce_info new_alua_info = self._find_new_alua_info( info, host_name, initiator) if host_id: @@ -2992,3 +2992,190 @@ def cancel_rollback_snapshot(self, snapshot_id): result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Cancel rollback snapshot %s error.' % snapshot_id) + + def ensure_roceini_added(self, initiator_name, host_id): + # Check and associate RoCE initiator to host on array + initiator = self._get_roceini_by_id(initiator_name) + + if not initiator: + self._add_roceini_to_array(initiator_name) + self._associate_roceini_to_host(initiator_name, host_id) + return + + if initiator.get('ISFREE') == "true": + self._associate_roceini_to_host(initiator_name, host_id) + return + + # if initiator was associated to another host + if initiator.get("PARENTID") != host_id: + msg = (_("Initiator %(ini)s has been added to another host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTNAME')}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _get_roceini_by_id(self, nqn): + """Get RoCE initiator from array.""" + url = '/NVMe_over_RoCE_initiator/%s' % nqn + result = self.call(url, None, "GET") + + if result.get('error', {}).get('code') == constants.FC_INITIATOR_NOT_EXIST: + LOG.warning('RoCE NQN %s not exist.', nqn) + return {} + self._assert_rest_result(result, 'get RoCE NQN %s error.' % nqn) + + return result.get("data", {}) + + def _add_roceini_to_array(self, nqn): + """Add a new RoCE initiator to storage device.""" + url = "/NVMe_over_RoCE_initiator" + data = {"ID": nqn} + result = self.call(url, data, "POST") + if result.get('error', {}).get('code') == constants.OBJECT_ALREADY_EXIST: + LOG.warning('RoCE NQN %s has already exist in array.', nqn) + else: + self._assert_rest_result( + result, _('Add RoCE initiator %s to array error.' % nqn)) + + def _associate_roceini_to_host(self, nqn, host_id): + """Associate RoCE initiator with the host.""" + url = "/host/create_associate" + data = {"ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ID": host_id, + "ASSOCIATEOBJID": nqn} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _("Associate RoCE initiator %(ini)s to host %(host)s " + "error." % {"ini": nqn, "host": host_id})) + + def is_roce_initiator_associated_to_host(self, initiator_name, host_id): + initiator = self._get_roceini_by_id(initiator_name) + if not initiator or initiator.get('ISFREE') == "true": + return False + + if initiator.get('PARENTID') == host_id: + return True + else: + msg = _("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTID')} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def remove_roce_initiator_from_host(self, initiator_name, host_id): + url = "/host/remove_associate" + data = {"ID": host_id, + "ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ASSOCIATEOBJID": initiator_name} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Remove RoCE initiator from host error.')) + + def get_roce_params(self, connector): + """Get target ROCE params, including IP.""" + host_nqn = connector.get('host_nqn') + host_name = connector.get('host') + target_ips = self._get_roce_target_ips(host_nqn, host_name) + + logic_ports = self.get_roce_logical_ports() + result = [] + for ip in target_ips: + if self._is_roce_target_ip_in_array(ip, logic_ports): + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + ip = '[' + ip + ']' + result.append(ip) + + if not result: + err_msg = _('There is no any logic ips exist on array of the ' + 'configured target_ip %s in conf file' % target_ips) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return result + + def _get_roce_target_ips(self, initiator, host_name): + target_ips = self._get_target_ips_by_initiator_name(initiator) + + if not target_ips: + target_ips = self._get_target_ips_by_host_name(host_name) + + if not target_ips: + msg = (_( + 'get_roce_params: Failed to get target IP ' + 'for host %(host)s, please check config file.') + % {'host': host_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get the default ip: %s.', target_ips) + return target_ips + + def _get_roce_logic_ports(self, start, end, params): + url = ("/lif?range=[%(start)s-%(end)s]" + % {"start": six.text_type(start), "end": six.text_type(end)}) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('get RoCE Logic Ports error.')) + return result.get('data', []) + + def get_roce_logical_ports(self): + all_logic_ports = self._get_info_by_range( + self._get_roce_logic_ports) + return all_logic_ports + + def _is_roce_target_ip_in_array(self, ip, logic_ports): + for logic_port in logic_ports: + if logic_port.get('ADDRESSFAMILY') == constants.ADDRESS_FAMILY_IPV4: + if ip == logic_port.get('IPV4ADDR'): + return True + else: + if self._is_same_ipv6(ip, logic_port.get('IPV6ADDR')): + return True + + return False + + @staticmethod + def _is_same_ipv6(left_ip, right_ip): + format_left_ip = str( + netaddr.IPAddress(left_ip).format(dialect=netaddr.ipv6_compact)) + format_right_ip = str( + netaddr.IPAddress(right_ip).format(dialect=netaddr.ipv6_compact)) + if format_left_ip == format_right_ip: + return True + + return False + + @staticmethod + def _get_target_ip_list(roce_info, target_ips): + for target_ip in roce_info.get('TargetIP').split(): + if target_ip.strip(): + target_ips.append(target_ip) + + def _get_target_ips_by_initiator_name(self, initiator): + target_ips = [] + for info in self.roce_info: + config_initiator = info.get('Name') + if not config_initiator: + continue + if config_initiator == initiator: + self._get_target_ip_list(info, target_ips) + return target_ips + + def _get_target_ips_by_host_name(self, host_name): + target_ips = [] + temp_target_ips = [] + for info in self.roce_info: + config_host_name = info.get('HostName') + if not config_host_name: + continue + if config_host_name == '*': + self._get_target_ip_list(info, temp_target_ips) + elif re.search(config_host_name, host_name): + self._get_target_ip_list(info, target_ips) + break + + if not target_ips and temp_target_ips: + target_ips = temp_target_ips + + return target_ips diff --git a/Cinder/Pike/__init__.py b/Cinder/Pike/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Pike/__init__.py +++ b/Cinder/Pike/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Pike/constants.py b/Cinder/Pike/constants.py index 5fc622a..f8118c8 100644 --- a/Cinder/Pike/constants.py +++ b/Cinder/Pike/constants.py @@ -49,6 +49,10 @@ MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' +# ROCE INITIATOR CONSTANTS +NVME_ROCE_INITIATOR_TYPE = '57870' +ADDRESS_FAMILY_IPV4 = '0' + ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_BAD_STATUS_LINE = -400 @@ -57,6 +61,7 @@ SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 ERROR_VOLUME_NOT_EXIST = 1077939726 ERROR_LUN_NOT_EXIST = 1077936859 ERROR_SNAPSHOT_NOT_EXIST = 1077937880 @@ -73,6 +78,7 @@ CLONE_PAIR_SYNC_COMPLETE = 1073798176 CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 HOST_ALREADY_IN_HOSTGROUP = 1077937501 +OBJECT_ALREADY_EXIST = 1077948997 LUN_ALREADY_IN_LUNGROUP = 1077948997 HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 @@ -148,7 +154,7 @@ 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ['V3', 'V5', '18000', 'Dorado', 'V6'] -VALID_PROTOCOL = ['FC', 'iSCSI'] +VALID_PROTOCOL = ['FC', 'iSCSI', 'nvmeof'] VALID_WRITE_TYPE = ['1', '2'] VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' diff --git a/Cinder/Pike/huawei_conf.py b/Cinder/Pike/huawei_conf.py index e253ee0..3f4968b 100644 --- a/Cinder/Pike/huawei_conf.py +++ b/Cinder/Pike/huawei_conf.py @@ -90,6 +90,7 @@ def update_config_value(self): self._force_delete_volume, self._iscsi_default_target_ip, self._iscsi_info, + self._roce_info, self._fc_info, self._ssl_cert_path, self._ssl_cert_verify, @@ -102,7 +103,8 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._rollback_speed, - self._set_qos_ignored_param) + self._set_qos_ignored_param, + self._get_rest_client_semaphore) for f in set_attr_funcs: f(xml_root) @@ -440,7 +442,7 @@ def _parse_rmt_iscsi_info(self, iscsi_info): # Step 5, make initiators configure dict, convert to: # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] get_opts = lambda x: x.split(':', 1) initiator_infos = map(lambda x: dict(map(get_opts, x)), initiator_infos) @@ -474,6 +476,8 @@ def get_hypermetro_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -507,6 +511,8 @@ def get_replication_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -529,6 +535,7 @@ def get_local_device(self): 'storage_pools': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'fc_info': self.conf.fc_info, + 'roce_info': self.conf.roce_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, 'in_band_or_not': self.conf.in_band_or_not, 'storage_sn': self.conf.storage_sn, @@ -679,3 +686,32 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _roce_info(self, xml_root): + nodes = xml_root.findall('RoCE/Initiator') + if nodes is None: + setattr(self.conf, 'roce_info', []) + return + + roce_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + roce_info.append(props) + + self._check_hostname_regex_config(roce_info) + setattr(self.conf, 'roce_info', roce_info) diff --git a/Cinder/Pike/huawei_driver.py b/Cinder/Pike/huawei_driver.py index c636563..256e298 100644 --- a/Cinder/Pike/huawei_driver.py +++ b/Cinder/Pike/huawei_driver.py @@ -81,7 +81,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -3642,3 +3642,226 @@ def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id + + +class HuaweiROCEDriver(HuaweiBaseDriver): + """RoCE driver for Huawei storage arrays. + + Version history: + 2.6.4 - start to support RoCE. + """ + + def __init__(self, *args, **kwargs): + super(HuaweiROCEDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'nvmeof' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target RoCE information.""" + self._check_roce_params(volume, connector) + + # Attach local lun. + roce_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_roce_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + roce_info.get('data').get('target_portals').extend( + rmt_roce_info.get('data').get('target_portals')) + roce_info.get('data').get('target_luns').extend( + rmt_roce_info.get('data').get('target_luns')) + + LOG.info('initialize_common_connection_roce, ' + 'return data is: %s.', roce_info) + return roce_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('Initialize RoCE connection for volume %(id)s, ' + 'connector info %(conn)s. array is in %(location)s.', + {'id': volume.id, 'conn': connector, + 'location': 'local' if local else 'remote'}) + + host_nqn = connector.get("host_nqn") + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + target_ips = client.get_roce_params(connector) + + host_id = client.add_host_with_check( + connector.get('host'), self.is_dorado_v6, host_nqn) + + try: + client.ensure_roceini_added(host_nqn, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping( + lun_info, hostgroup_id, host_id, + lun_type=lun_type, hypermetro_lun=hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_info, lun_type) + LOG.info('initialize_connection, host lun id is: %(id)s. ' + 'View info is %(view)s.', + {'id': host_lun_id, 'view': map_info}) + host_lun_id = int(host_lun_id) + mapping_info = { + 'target_portals': ['%s:4420' % ip for ip in target_ips], + 'target_luns': [host_lun_id] * len(target_ips), + 'transport_type': 'rdma', + 'host_nqn': host_nqn, + 'discard': True, + 'volume_nguid': lun_info.get("NGUID") + } + conn = { + 'driver_volume_type': 'nvmeof', + 'data': mapping_info + } + LOG.info('Initialize RoCE connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + self._check_roce_params(volume, connector) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector.get('host_nqn') + host_name = connector.get('host') + + LOG.info('terminate_connection: initiator name: %(ini)s, LUN ID: %(' + 'lunid)s, lun type: %(lun_type)s, connector: %(' + 'connector)s.', {'ini': initiator_name, 'lunid': lun_id, + 'lun_type': lun_type, + 'connector': connector}) + + lungroup_id = None + portgroup_id = None + view_id = None + + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + portgroup_id = client.get_portgroup_by_view(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, "lungroup_id": lungroup_id}) + if self.configuration.retain_storage_mapping: + return + + mapping_param = {'host_id': host_id, 'initiator_name': initiator_name, + 'lungroup_id': lungroup_id, 'view_id': view_id, + 'portgroup_id': portgroup_id} + self._delete_storage_mapping(client, mapping_param) + + def _delete_storage_mapping(self, client, mapping_param): + left_lun_num = -1 + lungroup_id = mapping_param.get('lungroup_id') + view_id = mapping_param.get('view_id') + portgroup_id = mapping_param.get('portgroup_id') + initiator_name = mapping_param.get('initiator_name') + host_id = mapping_param.get('host_id') + if lungroup_id: + left_lun_num = client.get_obj_count_from_lungroup(lungroup_id) + if view_id and (int(left_lun_num) <= 0): + if portgroup_id and client.is_portgroup_associated_to_view( + view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + + client.delete_lungroup(lungroup_id) + + if client.is_roce_initiator_associated_to_host( + initiator_name, host_id): + client.remove_roce_initiator_from_host(initiator_name, host_id) + + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + + client.delete_mapping_view(view_id) + + def _check_roce_params(self, volume, connector): + if not volume or not connector: + msg = _( + '%(param)s is none.' + % {'param': 'volume' if not volume else 'connector'}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not volume.id: + msg = _( + 'volume param is error. volume is %(volume)s.' + % {'volume': volume}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not connector.get('host_nqn') or not connector.get('host'): + msg = _( + 'connector param is error. connector is %(connector)s.' + % {'connector': connector}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.is_dorado_v6: + msg = _("Current storage doesn't support RoCE.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/Cinder/Pike/rest_client.py b/Cinder/Pike/rest_client.py index 93a44ab..4df29e2 100644 --- a/Cinder/Pike/rest_client.py +++ b/Cinder/Pike/rest_client.py @@ -58,13 +58,14 @@ def __init__(self, configuration, san_address, san_user, san_password, self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.roce_info = kwargs.get('roce_info', self.configuration.roce_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) self.metro_domain = kwargs.get('metro_domain', None) self.metro_sync_completed = strutils.bool_from_string( kwargs.get('metro_sync_completed')) - self.semaphore = threading.Semaphore(20) + self.semaphore = threading.Semaphore(self.configuration.semaphore) self.call_lock = lockutils.ReaderWriterLock() self.session = None self.url = None @@ -154,15 +155,14 @@ def do_call(self, url=None, data=None, method=None, } res_json = res.json() + response_time = res.elapsed.total_seconds() if not filter_flag: - LOG.info('\nRequest URL: %(url)s\n' - 'Call Method: %(method)s\n' - 'Request Data: %(data)s\n' - 'Response Data:%(res)s', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) + LOG.info('Request URL: %(url)s, Call Method: %(method)s,' + 'Request Data: %(data)s, Response Data:%(res)s,' + 'Response Time:%(res_time)s', + {'url': url, 'method': method, + 'data': data, 'res': res_json, + 'res_time': response_time}) return res_json @@ -840,7 +840,7 @@ def add_host_with_check(self, host_name, is_dorado_v6, initiator): host_id = huawei_utils.get_host_id(self, host_name) new_alua_info = {} if self.is_dorado_v6: - info = self.iscsi_info or self.fc_info + info = self.iscsi_info or self.fc_info or self.roce_info new_alua_info = self._find_new_alua_info( info, host_name, initiator) if host_id: @@ -2992,3 +2992,190 @@ def cancel_rollback_snapshot(self, snapshot_id): result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Cancel rollback snapshot %s error.' % snapshot_id) + + def ensure_roceini_added(self, initiator_name, host_id): + # Check and associate RoCE initiator to host on array + initiator = self._get_roceini_by_id(initiator_name) + + if not initiator: + self._add_roceini_to_array(initiator_name) + self._associate_roceini_to_host(initiator_name, host_id) + return + + if initiator.get('ISFREE') == "true": + self._associate_roceini_to_host(initiator_name, host_id) + return + + # if initiator was associated to another host + if initiator.get("PARENTID") != host_id: + msg = (_("Initiator %(ini)s has been added to another host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTNAME')}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _get_roceini_by_id(self, nqn): + """Get RoCE initiator from array.""" + url = '/NVMe_over_RoCE_initiator/%s' % nqn + result = self.call(url, None, "GET") + + if result.get('error', {}).get('code') == constants.FC_INITIATOR_NOT_EXIST: + LOG.warning('RoCE NQN %s not exist.', nqn) + return {} + self._assert_rest_result(result, 'get RoCE NQN %s error.' % nqn) + + return result.get("data", {}) + + def _add_roceini_to_array(self, nqn): + """Add a new RoCE initiator to storage device.""" + url = "/NVMe_over_RoCE_initiator" + data = {"ID": nqn} + result = self.call(url, data, "POST") + if result.get('error', {}).get('code') == constants.OBJECT_ALREADY_EXIST: + LOG.warning('RoCE NQN %s has already exist in array.', nqn) + else: + self._assert_rest_result( + result, _('Add RoCE initiator %s to array error.' % nqn)) + + def _associate_roceini_to_host(self, nqn, host_id): + """Associate RoCE initiator with the host.""" + url = "/host/create_associate" + data = {"ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ID": host_id, + "ASSOCIATEOBJID": nqn} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _("Associate RoCE initiator %(ini)s to host %(host)s " + "error." % {"ini": nqn, "host": host_id})) + + def is_roce_initiator_associated_to_host(self, initiator_name, host_id): + initiator = self._get_roceini_by_id(initiator_name) + if not initiator or initiator.get('ISFREE') == "true": + return False + + if initiator.get('PARENTID') == host_id: + return True + else: + msg = _("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTID')} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def remove_roce_initiator_from_host(self, initiator_name, host_id): + url = "/host/remove_associate" + data = {"ID": host_id, + "ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ASSOCIATEOBJID": initiator_name} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Remove RoCE initiator from host error.')) + + def get_roce_params(self, connector): + """Get target ROCE params, including IP.""" + host_nqn = connector.get('host_nqn') + host_name = connector.get('host') + target_ips = self._get_roce_target_ips(host_nqn, host_name) + + logic_ports = self.get_roce_logical_ports() + result = [] + for ip in target_ips: + if self._is_roce_target_ip_in_array(ip, logic_ports): + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + ip = '[' + ip + ']' + result.append(ip) + + if not result: + err_msg = _('There is no any logic ips exist on array of the ' + 'configured target_ip %s in conf file' % target_ips) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return result + + def _get_roce_target_ips(self, initiator, host_name): + target_ips = self._get_target_ips_by_initiator_name(initiator) + + if not target_ips: + target_ips = self._get_target_ips_by_host_name(host_name) + + if not target_ips: + msg = (_( + 'get_roce_params: Failed to get target IP ' + 'for host %(host)s, please check config file.') + % {'host': host_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get the default ip: %s.', target_ips) + return target_ips + + def _get_roce_logic_ports(self, start, end, params): + url = ("/lif?range=[%(start)s-%(end)s]" + % {"start": six.text_type(start), "end": six.text_type(end)}) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('get RoCE Logic Ports error.')) + return result.get('data', []) + + def get_roce_logical_ports(self): + all_logic_ports = self._get_info_by_range( + self._get_roce_logic_ports) + return all_logic_ports + + def _is_roce_target_ip_in_array(self, ip, logic_ports): + for logic_port in logic_ports: + if logic_port.get('ADDRESSFAMILY') == constants.ADDRESS_FAMILY_IPV4: + if ip == logic_port.get('IPV4ADDR'): + return True + else: + if self._is_same_ipv6(ip, logic_port.get('IPV6ADDR')): + return True + + return False + + @staticmethod + def _is_same_ipv6(left_ip, right_ip): + format_left_ip = str( + netaddr.IPAddress(left_ip).format(dialect=netaddr.ipv6_compact)) + format_right_ip = str( + netaddr.IPAddress(right_ip).format(dialect=netaddr.ipv6_compact)) + if format_left_ip == format_right_ip: + return True + + return False + + @staticmethod + def _get_target_ip_list(roce_info, target_ips): + for target_ip in roce_info.get('TargetIP').split(): + if target_ip.strip(): + target_ips.append(target_ip) + + def _get_target_ips_by_initiator_name(self, initiator): + target_ips = [] + for info in self.roce_info: + config_initiator = info.get('Name') + if not config_initiator: + continue + if config_initiator == initiator: + self._get_target_ip_list(info, target_ips) + return target_ips + + def _get_target_ips_by_host_name(self, host_name): + target_ips = [] + temp_target_ips = [] + for info in self.roce_info: + config_host_name = info.get('HostName') + if not config_host_name: + continue + if config_host_name == '*': + self._get_target_ip_list(info, temp_target_ips) + elif re.search(config_host_name, host_name): + self._get_target_ip_list(info, target_ips) + break + + if not target_ips and temp_target_ips: + target_ips = temp_target_ips + + return target_ips diff --git a/Cinder/Queens/__init__.py b/Cinder/Queens/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Queens/__init__.py +++ b/Cinder/Queens/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Queens/constants.py b/Cinder/Queens/constants.py index 5fc622a..f8118c8 100644 --- a/Cinder/Queens/constants.py +++ b/Cinder/Queens/constants.py @@ -49,6 +49,10 @@ MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' +# ROCE INITIATOR CONSTANTS +NVME_ROCE_INITIATOR_TYPE = '57870' +ADDRESS_FAMILY_IPV4 = '0' + ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_BAD_STATUS_LINE = -400 @@ -57,6 +61,7 @@ SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 ERROR_VOLUME_NOT_EXIST = 1077939726 ERROR_LUN_NOT_EXIST = 1077936859 ERROR_SNAPSHOT_NOT_EXIST = 1077937880 @@ -73,6 +78,7 @@ CLONE_PAIR_SYNC_COMPLETE = 1073798176 CLONE_PAIR_SYNC_NOT_EXIST = 1073798172 HOST_ALREADY_IN_HOSTGROUP = 1077937501 +OBJECT_ALREADY_EXIST = 1077948997 LUN_ALREADY_IN_LUNGROUP = 1077948997 HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 @@ -148,7 +154,7 @@ 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ['V3', 'V5', '18000', 'Dorado', 'V6'] -VALID_PROTOCOL = ['FC', 'iSCSI'] +VALID_PROTOCOL = ['FC', 'iSCSI', 'nvmeof'] VALID_WRITE_TYPE = ['1', '2'] VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' diff --git a/Cinder/Queens/huawei_conf.py b/Cinder/Queens/huawei_conf.py index e90ddb3..cd0a2c9 100644 --- a/Cinder/Queens/huawei_conf.py +++ b/Cinder/Queens/huawei_conf.py @@ -90,6 +90,7 @@ def update_config_value(self): self._force_delete_volume, self._iscsi_default_target_ip, self._iscsi_info, + self._roce_info, self._fc_info, self._ssl_cert_path, self._ssl_cert_verify, @@ -102,7 +103,8 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._rollback_speed, - self._set_qos_ignored_param) + self._set_qos_ignored_param, + self._get_rest_client_semaphore) for f in set_attr_funcs: f(xml_root) @@ -440,7 +442,7 @@ def _parse_rmt_iscsi_info(self, iscsi_info): # Step 5, make initiators configure dict, convert to: # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] get_opts = lambda x: x.split(':', 1) initiator_infos = map(lambda x: dict(map(get_opts, x)), initiator_infos) @@ -474,6 +476,8 @@ def get_hypermetro_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -507,6 +511,8 @@ def get_replication_devices(self): dev.get('iscsi_info')) dev_config['fc_info'] = self._parse_rmt_iscsi_info( dev.get('fc_info')) + dev_config['roce_info'] = self._parse_rmt_iscsi_info( + dev.get('roce_info')) dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev @@ -529,6 +535,7 @@ def get_local_device(self): 'storage_pools': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'fc_info': self.conf.fc_info, + 'roce_info': self.conf.roce_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, 'in_band_or_not': self.conf.in_band_or_not, 'storage_sn': self.conf.storage_sn, @@ -679,3 +686,32 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _roce_info(self, xml_root): + nodes = xml_root.findall('RoCE/Initiator') + if nodes is None: + setattr(self.conf, 'roce_info', []) + return + + roce_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + roce_info.append(props) + + self._check_hostname_regex_config(roce_info) + setattr(self.conf, 'roce_info', roce_info) diff --git a/Cinder/Queens/huawei_driver.py b/Cinder/Queens/huawei_driver.py index c636563..256e298 100644 --- a/Cinder/Queens/huawei_driver.py +++ b/Cinder/Queens/huawei_driver.py @@ -81,7 +81,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -3642,3 +3642,226 @@ def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id + + +class HuaweiROCEDriver(HuaweiBaseDriver): + """RoCE driver for Huawei storage arrays. + + Version history: + 2.6.4 - start to support RoCE. + """ + + def __init__(self, *args, **kwargs): + super(HuaweiROCEDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'nvmeof' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target RoCE information.""" + self._check_roce_params(volume, connector) + + # Attach local lun. + roce_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_roce_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + roce_info.get('data').get('target_portals').extend( + rmt_roce_info.get('data').get('target_portals')) + roce_info.get('data').get('target_luns').extend( + rmt_roce_info.get('data').get('target_luns')) + + LOG.info('initialize_common_connection_roce, ' + 'return data is: %s.', roce_info) + return roce_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('Initialize RoCE connection for volume %(id)s, ' + 'connector info %(conn)s. array is in %(location)s.', + {'id': volume.id, 'conn': connector, + 'location': 'local' if local else 'remote'}) + + host_nqn = connector.get("host_nqn") + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + target_ips = client.get_roce_params(connector) + + host_id = client.add_host_with_check( + connector.get('host'), self.is_dorado_v6, host_nqn) + + try: + client.ensure_roceini_added(host_nqn, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping( + lun_info, hostgroup_id, host_id, + lun_type=lun_type, hypermetro_lun=hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_info, lun_type) + LOG.info('initialize_connection, host lun id is: %(id)s. ' + 'View info is %(view)s.', + {'id': host_lun_id, 'view': map_info}) + host_lun_id = int(host_lun_id) + mapping_info = { + 'target_portals': ['%s:4420' % ip for ip in target_ips], + 'target_luns': [host_lun_id] * len(target_ips), + 'transport_type': 'rdma', + 'host_nqn': host_nqn, + 'discard': True, + 'volume_nguid': lun_info.get("NGUID") + } + conn = { + 'driver_volume_type': 'nvmeof', + 'data': mapping_info + } + LOG.info('Initialize RoCE connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + self._check_roce_params(volume, connector) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector.get('host_nqn') + host_name = connector.get('host') + + LOG.info('terminate_connection: initiator name: %(ini)s, LUN ID: %(' + 'lunid)s, lun type: %(lun_type)s, connector: %(' + 'connector)s.', {'ini': initiator_name, 'lunid': lun_id, + 'lun_type': lun_type, + 'connector': connector}) + + lungroup_id = None + portgroup_id = None + view_id = None + + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + portgroup_id = client.get_portgroup_by_view(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, "lungroup_id": lungroup_id}) + if self.configuration.retain_storage_mapping: + return + + mapping_param = {'host_id': host_id, 'initiator_name': initiator_name, + 'lungroup_id': lungroup_id, 'view_id': view_id, + 'portgroup_id': portgroup_id} + self._delete_storage_mapping(client, mapping_param) + + def _delete_storage_mapping(self, client, mapping_param): + left_lun_num = -1 + lungroup_id = mapping_param.get('lungroup_id') + view_id = mapping_param.get('view_id') + portgroup_id = mapping_param.get('portgroup_id') + initiator_name = mapping_param.get('initiator_name') + host_id = mapping_param.get('host_id') + if lungroup_id: + left_lun_num = client.get_obj_count_from_lungroup(lungroup_id) + if view_id and (int(left_lun_num) <= 0): + if portgroup_id and client.is_portgroup_associated_to_view( + view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + + client.delete_lungroup(lungroup_id) + + if client.is_roce_initiator_associated_to_host( + initiator_name, host_id): + client.remove_roce_initiator_from_host(initiator_name, host_id) + + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + + client.delete_mapping_view(view_id) + + def _check_roce_params(self, volume, connector): + if not volume or not connector: + msg = _( + '%(param)s is none.' + % {'param': 'volume' if not volume else 'connector'}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not volume.id: + msg = _( + 'volume param is error. volume is %(volume)s.' + % {'volume': volume}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not connector.get('host_nqn') or not connector.get('host'): + msg = _( + 'connector param is error. connector is %(connector)s.' + % {'connector': connector}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.is_dorado_v6: + msg = _("Current storage doesn't support RoCE.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/Cinder/Queens/rest_client.py b/Cinder/Queens/rest_client.py index 93a44ab..4df29e2 100644 --- a/Cinder/Queens/rest_client.py +++ b/Cinder/Queens/rest_client.py @@ -58,13 +58,14 @@ def __init__(self, configuration, san_address, san_user, san_password, self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.roce_info = kwargs.get('roce_info', self.configuration.roce_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) self.metro_domain = kwargs.get('metro_domain', None) self.metro_sync_completed = strutils.bool_from_string( kwargs.get('metro_sync_completed')) - self.semaphore = threading.Semaphore(20) + self.semaphore = threading.Semaphore(self.configuration.semaphore) self.call_lock = lockutils.ReaderWriterLock() self.session = None self.url = None @@ -154,15 +155,14 @@ def do_call(self, url=None, data=None, method=None, } res_json = res.json() + response_time = res.elapsed.total_seconds() if not filter_flag: - LOG.info('\nRequest URL: %(url)s\n' - 'Call Method: %(method)s\n' - 'Request Data: %(data)s\n' - 'Response Data:%(res)s', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) + LOG.info('Request URL: %(url)s, Call Method: %(method)s,' + 'Request Data: %(data)s, Response Data:%(res)s,' + 'Response Time:%(res_time)s', + {'url': url, 'method': method, + 'data': data, 'res': res_json, + 'res_time': response_time}) return res_json @@ -840,7 +840,7 @@ def add_host_with_check(self, host_name, is_dorado_v6, initiator): host_id = huawei_utils.get_host_id(self, host_name) new_alua_info = {} if self.is_dorado_v6: - info = self.iscsi_info or self.fc_info + info = self.iscsi_info or self.fc_info or self.roce_info new_alua_info = self._find_new_alua_info( info, host_name, initiator) if host_id: @@ -2992,3 +2992,190 @@ def cancel_rollback_snapshot(self, snapshot_id): result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Cancel rollback snapshot %s error.' % snapshot_id) + + def ensure_roceini_added(self, initiator_name, host_id): + # Check and associate RoCE initiator to host on array + initiator = self._get_roceini_by_id(initiator_name) + + if not initiator: + self._add_roceini_to_array(initiator_name) + self._associate_roceini_to_host(initiator_name, host_id) + return + + if initiator.get('ISFREE') == "true": + self._associate_roceini_to_host(initiator_name, host_id) + return + + # if initiator was associated to another host + if initiator.get("PARENTID") != host_id: + msg = (_("Initiator %(ini)s has been added to another host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTNAME')}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _get_roceini_by_id(self, nqn): + """Get RoCE initiator from array.""" + url = '/NVMe_over_RoCE_initiator/%s' % nqn + result = self.call(url, None, "GET") + + if result.get('error', {}).get('code') == constants.FC_INITIATOR_NOT_EXIST: + LOG.warning('RoCE NQN %s not exist.', nqn) + return {} + self._assert_rest_result(result, 'get RoCE NQN %s error.' % nqn) + + return result.get("data", {}) + + def _add_roceini_to_array(self, nqn): + """Add a new RoCE initiator to storage device.""" + url = "/NVMe_over_RoCE_initiator" + data = {"ID": nqn} + result = self.call(url, data, "POST") + if result.get('error', {}).get('code') == constants.OBJECT_ALREADY_EXIST: + LOG.warning('RoCE NQN %s has already exist in array.', nqn) + else: + self._assert_rest_result( + result, _('Add RoCE initiator %s to array error.' % nqn)) + + def _associate_roceini_to_host(self, nqn, host_id): + """Associate RoCE initiator with the host.""" + url = "/host/create_associate" + data = {"ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ID": host_id, + "ASSOCIATEOBJID": nqn} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _("Associate RoCE initiator %(ini)s to host %(host)s " + "error." % {"ini": nqn, "host": host_id})) + + def is_roce_initiator_associated_to_host(self, initiator_name, host_id): + initiator = self._get_roceini_by_id(initiator_name) + if not initiator or initiator.get('ISFREE') == "true": + return False + + if initiator.get('PARENTID') == host_id: + return True + else: + msg = _("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": initiator_name, + "host": initiator.get('PARENTID')} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def remove_roce_initiator_from_host(self, initiator_name, host_id): + url = "/host/remove_associate" + data = {"ID": host_id, + "ASSOCIATEOBJTYPE": constants.NVME_ROCE_INITIATOR_TYPE, + "ASSOCIATEOBJID": initiator_name} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Remove RoCE initiator from host error.')) + + def get_roce_params(self, connector): + """Get target ROCE params, including IP.""" + host_nqn = connector.get('host_nqn') + host_name = connector.get('host') + target_ips = self._get_roce_target_ips(host_nqn, host_name) + + logic_ports = self.get_roce_logical_ports() + result = [] + for ip in target_ips: + if self._is_roce_target_ip_in_array(ip, logic_ports): + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + ip = '[' + ip + ']' + result.append(ip) + + if not result: + err_msg = _('There is no any logic ips exist on array of the ' + 'configured target_ip %s in conf file' % target_ips) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return result + + def _get_roce_target_ips(self, initiator, host_name): + target_ips = self._get_target_ips_by_initiator_name(initiator) + + if not target_ips: + target_ips = self._get_target_ips_by_host_name(host_name) + + if not target_ips: + msg = (_( + 'get_roce_params: Failed to get target IP ' + 'for host %(host)s, please check config file.') + % {'host': host_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get the default ip: %s.', target_ips) + return target_ips + + def _get_roce_logic_ports(self, start, end, params): + url = ("/lif?range=[%(start)s-%(end)s]" + % {"start": six.text_type(start), "end": six.text_type(end)}) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('get RoCE Logic Ports error.')) + return result.get('data', []) + + def get_roce_logical_ports(self): + all_logic_ports = self._get_info_by_range( + self._get_roce_logic_ports) + return all_logic_ports + + def _is_roce_target_ip_in_array(self, ip, logic_ports): + for logic_port in logic_ports: + if logic_port.get('ADDRESSFAMILY') == constants.ADDRESS_FAMILY_IPV4: + if ip == logic_port.get('IPV4ADDR'): + return True + else: + if self._is_same_ipv6(ip, logic_port.get('IPV6ADDR')): + return True + + return False + + @staticmethod + def _is_same_ipv6(left_ip, right_ip): + format_left_ip = str( + netaddr.IPAddress(left_ip).format(dialect=netaddr.ipv6_compact)) + format_right_ip = str( + netaddr.IPAddress(right_ip).format(dialect=netaddr.ipv6_compact)) + if format_left_ip == format_right_ip: + return True + + return False + + @staticmethod + def _get_target_ip_list(roce_info, target_ips): + for target_ip in roce_info.get('TargetIP').split(): + if target_ip.strip(): + target_ips.append(target_ip) + + def _get_target_ips_by_initiator_name(self, initiator): + target_ips = [] + for info in self.roce_info: + config_initiator = info.get('Name') + if not config_initiator: + continue + if config_initiator == initiator: + self._get_target_ip_list(info, target_ips) + return target_ips + + def _get_target_ips_by_host_name(self, host_name): + target_ips = [] + temp_target_ips = [] + for info in self.roce_info: + config_host_name = info.get('HostName') + if not config_host_name: + continue + if config_host_name == '*': + self._get_target_ip_list(info, temp_target_ips) + elif re.search(config_host_name, host_name): + self._get_target_ip_list(info, target_ips) + break + + if not target_ips and temp_target_ips: + target_ips = temp_target_ips + + return target_ips diff --git a/Cinder/Rocky/__init__.py b/Cinder/Rocky/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Rocky/__init__.py +++ b/Cinder/Rocky/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Rocky/constants.py b/Cinder/Rocky/constants.py index 756c839..8b54853 100644 --- a/Cinder/Rocky/constants.py +++ b/Cinder/Rocky/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Rocky/huawei_base_driver.py b/Cinder/Rocky/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Rocky/huawei_base_driver.py +++ b/Cinder/Rocky/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Rocky/huawei_conf.py b/Cinder/Rocky/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Rocky/huawei_conf.py +++ b/Cinder/Rocky/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Rocky/rest_client.py b/Cinder/Rocky/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Rocky/rest_client.py +++ b/Cinder/Rocky/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Stein/__init__.py b/Cinder/Stein/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Stein/__init__.py +++ b/Cinder/Stein/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Stein/constants.py b/Cinder/Stein/constants.py index 756c839..8b54853 100644 --- a/Cinder/Stein/constants.py +++ b/Cinder/Stein/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Stein/huawei_base_driver.py b/Cinder/Stein/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Stein/huawei_base_driver.py +++ b/Cinder/Stein/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Stein/huawei_conf.py b/Cinder/Stein/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Stein/huawei_conf.py +++ b/Cinder/Stein/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Stein/rest_client.py b/Cinder/Stein/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Stein/rest_client.py +++ b/Cinder/Stein/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Train/__init__.py b/Cinder/Train/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Train/__init__.py +++ b/Cinder/Train/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Train/constants.py b/Cinder/Train/constants.py index 756c839..8b54853 100644 --- a/Cinder/Train/constants.py +++ b/Cinder/Train/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Train/huawei_base_driver.py b/Cinder/Train/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Train/huawei_base_driver.py +++ b/Cinder/Train/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Train/huawei_conf.py b/Cinder/Train/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Train/huawei_conf.py +++ b/Cinder/Train/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Train/rest_client.py b/Cinder/Train/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Train/rest_client.py +++ b/Cinder/Train/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Ussuri/__init__.py b/Cinder/Ussuri/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Ussuri/__init__.py +++ b/Cinder/Ussuri/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Ussuri/constants.py b/Cinder/Ussuri/constants.py index 756c839..8b54853 100644 --- a/Cinder/Ussuri/constants.py +++ b/Cinder/Ussuri/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Ussuri/huawei_base_driver.py b/Cinder/Ussuri/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Ussuri/huawei_base_driver.py +++ b/Cinder/Ussuri/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Ussuri/huawei_conf.py b/Cinder/Ussuri/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Ussuri/huawei_conf.py +++ b/Cinder/Ussuri/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Ussuri/rest_client.py b/Cinder/Ussuri/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Ussuri/rest_client.py +++ b/Cinder/Ussuri/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Victoria/__init__.py b/Cinder/Victoria/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Victoria/__init__.py +++ b/Cinder/Victoria/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Victoria/constants.py b/Cinder/Victoria/constants.py index 756c839..8b54853 100644 --- a/Cinder/Victoria/constants.py +++ b/Cinder/Victoria/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Victoria/huawei_base_driver.py b/Cinder/Victoria/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Victoria/huawei_base_driver.py +++ b/Cinder/Victoria/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Victoria/huawei_conf.py b/Cinder/Victoria/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Victoria/huawei_conf.py +++ b/Cinder/Victoria/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Victoria/rest_client.py b/Cinder/Victoria/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Victoria/rest_client.py +++ b/Cinder/Victoria/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Wallaby/__init__.py b/Cinder/Wallaby/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Wallaby/__init__.py +++ b/Cinder/Wallaby/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Wallaby/constants.py b/Cinder/Wallaby/constants.py index 756c839..8b54853 100644 --- a/Cinder/Wallaby/constants.py +++ b/Cinder/Wallaby/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Wallaby/huawei_base_driver.py b/Cinder/Wallaby/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Wallaby/huawei_base_driver.py +++ b/Cinder/Wallaby/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Wallaby/huawei_conf.py b/Cinder/Wallaby/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Wallaby/huawei_conf.py +++ b/Cinder/Wallaby/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Wallaby/rest_client.py b/Cinder/Wallaby/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Wallaby/rest_client.py +++ b/Cinder/Wallaby/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Xena/__init__.py b/Cinder/Xena/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Xena/__init__.py +++ b/Cinder/Xena/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Xena/constants.py b/Cinder/Xena/constants.py index 756c839..8b54853 100644 --- a/Cinder/Xena/constants.py +++ b/Cinder/Xena/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Xena/huawei_base_driver.py b/Cinder/Xena/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Xena/huawei_base_driver.py +++ b/Cinder/Xena/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Xena/huawei_conf.py b/Cinder/Xena/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Xena/huawei_conf.py +++ b/Cinder/Xena/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Xena/rest_client.py b/Cinder/Xena/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Xena/rest_client.py +++ b/Cinder/Xena/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Yoga/__init__.py b/Cinder/Yoga/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Yoga/__init__.py +++ b/Cinder/Yoga/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Yoga/constants.py b/Cinder/Yoga/constants.py index 756c839..8b54853 100644 --- a/Cinder/Yoga/constants.py +++ b/Cinder/Yoga/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Yoga/huawei_base_driver.py b/Cinder/Yoga/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Yoga/huawei_base_driver.py +++ b/Cinder/Yoga/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Yoga/huawei_conf.py b/Cinder/Yoga/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Yoga/huawei_conf.py +++ b/Cinder/Yoga/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Yoga/rest_client.py b/Cinder/Yoga/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Yoga/rest_client.py +++ b/Cinder/Yoga/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/Cinder/Zed/__init__.py b/Cinder/Zed/__init__.py index ea50481..7da7786 100644 --- a/Cinder/Zed/__init__.py +++ b/Cinder/Zed/__init__.py @@ -1 +1 @@ -"""Version: 2.6.3""" +"""Version: 2.6.4""" diff --git a/Cinder/Zed/constants.py b/Cinder/Zed/constants.py index 756c839..8b54853 100644 --- a/Cinder/Zed/constants.py +++ b/Cinder/Zed/constants.py @@ -37,7 +37,8 @@ DEFAULT_WAIT_INTERVAL = 5 MAX_NAME_LENGTH = 31 SOCKET_TIMEOUT = 52 -LOGIN_SOCKET_TIMEOUT = 4 +LOGIN_SOCKET_TIMEOUT = 32 +DEFAULT_SEMAPHORE = 20 PWD_EXPIRED_OR_INITIAL = (3, 4) LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') diff --git a/Cinder/Zed/huawei_base_driver.py b/Cinder/Zed/huawei_base_driver.py index cd6926f..3ff5fc1 100644 --- a/Cinder/Zed/huawei_base_driver.py +++ b/Cinder/Zed/huawei_base_driver.py @@ -55,7 +55,7 @@ class HuaweiBaseDriver(object): - VERSION = "2.6.3" + VERSION = "2.6.4" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) @@ -86,7 +86,8 @@ def do_setup(self, context): 'ssl_cert_verify': self.configuration.ssl_cert_verify, 'ssl_cert_path': self.configuration.ssl_cert_path, 'in_band_or_not': self.configuration.in_band_or_not, - 'storage_sn': self.configuration.storage_sn + 'storage_sn': self.configuration.storage_sn, + 'semaphore': self.configuration.semaphore } self.local_cli = rest_client.RestClient(config_dict) self.local_cli.login() @@ -97,11 +98,17 @@ def do_setup(self, context): self.support_capability[c] = False if self.configuration.hypermetro: + self.configuration.hypermetro.update( + {'semaphore': self.configuration.semaphore} + ) self.hypermetro_rmt_cli = rest_client.RestClient( self.configuration.hypermetro) self.hypermetro_rmt_cli.login() if self.configuration.replication: + self.configuration.replication.update( + {'semaphore': self.configuration.semaphore} + ) self.replication_rmt_cli = rest_client.RestClient( self.configuration.replication) self.replication_rmt_cli.login() diff --git a/Cinder/Zed/huawei_conf.py b/Cinder/Zed/huawei_conf.py index b76e6db..d93775c 100644 --- a/Cinder/Zed/huawei_conf.py +++ b/Cinder/Zed/huawei_conf.py @@ -84,6 +84,7 @@ def update_config_value(self): self._get_local_in_band_or_not, self._get_local_storage_sn, self._set_qos_ignored_param, + self._get_rest_client_semaphore, ) for f in attr_funcs: @@ -640,3 +641,15 @@ def _set_qos_ignored_param(xml_root): qos_ignored_params = text.split(';') qos_ignored_params = list(set(x.strip() for x in qos_ignored_params if x.strip())) setattr(constants, 'QOS_IGNORED_PARAMS', qos_ignored_params) + + def _get_rest_client_semaphore(self, xml_root): + semaphore = xml_root.findtext('Storage/Semaphore') + if not semaphore or not semaphore.strip(): + setattr(self.conf, 'semaphore', constants.DEFAULT_SEMAPHORE) + elif semaphore.isdigit() and int(semaphore) > 0: + setattr(self.conf, 'semaphore', int(semaphore)) + else: + msg = _("Semaphore configured error. The semaphore must be an " + "integer and must be greater than zero") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/Cinder/Zed/rest_client.py b/Cinder/Zed/rest_client.py index 9d5c923..8932395 100644 --- a/Cinder/Zed/rest_client.py +++ b/Cinder/Zed/rest_client.py @@ -38,10 +38,6 @@ def _error_code(result): return result['error']['code'] -# To limit the requests concurrently sent to array -_semaphore = threading.Semaphore(20) - - def obj_operation_wrapper(func): @functools.wraps(func) def wrapped(self, url_format=None, **kwargs): @@ -49,7 +45,7 @@ def wrapped(self, url_format=None, **kwargs): if url_format: url += url_format % kwargs - _semaphore.acquire() + self.semaphore.acquire() try: result = func(self, url, **kwargs) @@ -57,7 +53,7 @@ def wrapped(self, url_format=None, **kwargs): return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} finally: - _semaphore.release() + self.semaphore.release() return result @@ -67,6 +63,7 @@ def wrapped(self, url_format=None, **kwargs): class CommonObject(object): def __init__(self, client): self.client = client + self.semaphore = client.semaphore @obj_operation_wrapper def post(self, url, **kwargs): @@ -1403,12 +1400,9 @@ def wrapped(self, url, **kwargs): need_relogin = False if not kwargs.get('log_filter'): - LOG.info('\nURL: %(url)s\n' - 'Method: %(method)s\n' - 'Data: %(data)s\n', + LOG.info('URL: %(url)s, Method: %(method)s, Data: %(data)s,', {'url': (self._login_url or '') + url, - 'method': func.__name__, - 'data': kwargs.get('data')}) + 'method': func.__name__, 'data': kwargs.get('data')}) with self._session_lock.read_lock(): if self._login_url: @@ -1451,8 +1445,10 @@ def wrapped(self, url, **kwargs): r.raise_for_status() result = r.json() + response_time = r.elapsed.total_seconds() if not kwargs.get('log_filter'): - LOG.info('Response: %s', result) + LOG.info('Response: %s, Response duration time is %s', + result, response_time) return result return wrapped @@ -1468,6 +1464,9 @@ def __init__(self, config_dict): self.cert_path = config_dict.get('ssl_cert_path') self.in_band_or_not = config_dict.get('in_band_or_not') self.storage_sn = config_dict.get('storage_sn') + # To limit the requests concurrently sent to array + self.semaphore = threading.Semaphore( + config_dict.get('semaphore', constants.DEFAULT_SEMAPHORE)) self._login_url = None self._login_device_id = None diff --git a/ConfigDoc/en/OpenStack Cinder Driver Configuration Guide.pdf b/ConfigDoc/en/OpenStack Cinder Driver Configuration Guide.pdf index e0ef30d..badf42c 100644 Binary files a/ConfigDoc/en/OpenStack Cinder Driver Configuration Guide.pdf and b/ConfigDoc/en/OpenStack Cinder Driver Configuration Guide.pdf differ diff --git a/ConfigDoc/en/OpenStack Cinder Driver For PowerVC Configuration Guide.pdf b/ConfigDoc/en/OpenStack Cinder Driver For PowerVC Configuration Guide.pdf new file mode 100644 index 0000000..e2f6189 Binary files /dev/null and b/ConfigDoc/en/OpenStack Cinder Driver For PowerVC Configuration Guide.pdf differ diff --git "a/ConfigDoc/zh/OpenStack Cinder Driver For PowerVC\351\205\215\347\275\256\346\214\207\345\215\227.pdf" "b/ConfigDoc/zh/OpenStack Cinder Driver For PowerVC\351\205\215\347\275\256\346\214\207\345\215\227.pdf" new file mode 100644 index 0000000..587ba57 Binary files /dev/null and "b/ConfigDoc/zh/OpenStack Cinder Driver For PowerVC\351\205\215\347\275\256\346\214\207\345\215\227.pdf" differ diff --git "a/ConfigDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" "b/ConfigDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" index 25c5859..dee884c 100644 Binary files "a/ConfigDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" and "b/ConfigDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" differ diff --git a/PowerVC/__init__.py b/PowerVC/__init__.py new file mode 100644 index 0000000..7da7786 --- /dev/null +++ b/PowerVC/__init__.py @@ -0,0 +1 @@ +"""Version: 2.6.4""" diff --git a/PowerVC/constants.py b/PowerVC/constants.py new file mode 100644 index 0000000..6d857b1 --- /dev/null +++ b/PowerVC/constants.py @@ -0,0 +1,203 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +STATUS_HEALTH = '1' +STATUS_ACTIVE = '43' +STATUS_RUNNING = '10' +STATUS_VOLUME_READY = '27' +STATUS_LUNCOPY_READY = '40' +STATUS_QOS_ACTIVE = '2' +STATUS_QOS_INACTIVATED = '45' +STATUS_SNAPSHOT_INACTIVE = '45' +STATUS_SNAPSHOT_ACTIVE = '43' +LUN_TYPE = '11' +SNAPSHOT_TYPE = '27' + +BLOCK_STORAGE_POOL_TYPE = '1' +DORADO_V6_POOL_TYPE = '0' +FILE_SYSTEM_POOL_TYPE = '2' + +HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' +LUNGROUP_PREFIX = 'OpenStack_LunGroup_' +MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' +PORTGROUP_PREFIX = 'OpenStack_PortGroup_' +QOS_NAME_PREFIX = 'OpenStack_' +PORTGROUP_DESCRIP_PREFIX = "Please do NOT modify this. Engine ID: " +FC_PORT_CONNECTED = '10' +FC_INIT_ONLINE = '27' +FC_PORT_MODE_FABRIC = '0' +CAPACITY_UNIT = 1024 * 1024 * 2 +DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 +DEFAULT_WAIT_INTERVAL = 5 + +MIGRATION_WAIT_INTERVAL = 5 +MIGRATION_FAULT = '74' +MIGRATION_COMPLETE = '76' + +ERROR_CONNECT_TO_SERVER = -403 +ERROR_UNAUTHORIZED_TO_SERVER = -401 +ERROR_BAD_STATUS_LINE = -400 +HTTP_ERROR_NOT_FOUND = 404 +SOCKET_TIMEOUT = 52 +ERROR_VOLUME_ALREADY_EXIST = 1077948993 +LOGIN_SOCKET_TIMEOUT = 32 +ERROR_VOLUME_NOT_EXIST = 1077939726 +ERROR_LUN_NOT_EXIST = 1077936859 +ERROR_SNAPSHOT_NOT_EXIST = 1077937880 +FC_INITIATOR_NOT_EXIST = 1077948996 +HYPERMETROPAIR_NOT_EXIST = 1077674242 +REPLICATIONPAIR_NOT_EXIST = 1077937923 +REPLICG_IS_EMPTY = 1077937960 + +RELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST] +RUNNING_NORMAL = '1' +RUNNING_SYNC = '23' +RUNNING_STOP = '41' +RUNNING_TO_BE_SYNC = '100' +METRO_SYNC_NORMAL = (RUNNING_NORMAL, RUNNING_SYNC, RUNNING_TO_BE_SYNC) +HEALTH_NORMAL = '1' + +NO_SPLITMIRROR_LICENSE = 1077950233 +NO_MIGRATION_LICENSE = 1073806606 + +THICK_LUNTYPE = 0 +THIN_LUNTYPE = 1 +MAX_NAME_LENGTH = 31 +MAX_VOL_DESCRIPTION = 170 +PORT_NUM_PER_CONTR = 2 +MAX_QUERY_COUNT = 100 +MAX_QUERY_LUN_COUNT = 200 + +OS_TYPE = {'Linux': '0', + 'Windows': '1', + 'Solaris': '2', + 'HP-UX': '3', + 'AIX': '4', + 'XenServer': '5', + 'Mac OS X': '6', + 'VMware ESX': '7'} + +LOWER_LIMIT_KEYS = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] +UPPER_LIMIT_KEYS = ['MAXIOPS', 'MAXBANDWIDTH'] +PWD_EXPIRED_OR_INITIAL = (3, 4) + +DEFAULT_REPLICA_WAIT_INTERVAL = 1 +DEFAULT_REPLICA_WAIT_TIMEOUT = 20 + +REPLICA_SYNC_MODEL = '1' +REPLICA_ASYNC_MODEL = '2' +REPLICA_SPEED = '2' +REPLICA_PERIOD = '3600' +REPLICA_SECOND_RO = '2' +REPLICA_SECOND_RW = '3' +REPLICG_PERIOD = '60' + +REPLICA_RUNNING_STATUS_KEY = 'RUNNINGSTATUS' +REPLICA_RUNNING_STATUS_INITIAL_SYNC = '21' +REPLICA_RUNNING_STATUS_SYNC = '23' +REPLICA_RUNNING_STATUS_SYNCED = '24' +REPLICA_RUNNING_STATUS_NORMAL = '1' +REPLICA_RUNNING_STATUS_SPLIT = '26' +REPLICA_RUNNING_STATUS_ERRUPTED = '34' +REPLICA_RUNNING_STATUS_INVALID = '35' + +REPLICA_HEALTH_STATUS_KEY = 'HEALTHSTATUS' +REPLICA_HEALTH_STATUS_NORMAL = '1' + +REPLICA_LOCAL_DATA_STATUS_KEY = 'PRIRESDATASTATUS' +REPLICA_REMOTE_DATA_STATUS_KEY = 'SECRESDATASTATUS' +REPLICA_DATA_SYNC_KEY = 'ISDATASYNC' +REPLICA_DATA_STATUS_SYNCED = '1' +REPLICA_DATA_STATUS_COMPLETE = '2' +REPLICA_DATA_STATUS_INCOMPLETE = '3' + +SNAPSHOT_NOT_EXISTS_WARN = 'warning' +SNAPSHOT_NOT_EXISTS_RAISE = 'raise' + +LUN_TYPE_MAP = {'Thick': THICK_LUNTYPE, + 'Thin': THIN_LUNTYPE} + +VALID_PRODUCT = ['T', 'TV2', 'V3', 'V5', '18000', 'Dorado', 'V6'] +VALID_PROTOCOL = ['FC', 'iSCSI'] +DORADO_V6_AND_V6_PRODUCT = ('Dorado', 'V6') +VALID_WRITE_TYPE = ['1', '2'] +VOLUME_NOT_EXISTS_WARN = 'warning' +VOLUME_NOT_EXISTS_RAISE = 'raise' + +LUN_COPY_SPEED_TYPES = ( + LUN_COPY_SPEED_LOW, + LUN_COPY_SPEED_MEDIUM, + LUN_COPY_SPEED_HIGH, + LUN_COPY_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +REPLICG_STATUS_NORMAL = '1' +REPLICG_STATUS_SYNCING = '23' +REPLICG_STATUS_TO_BE_RECOVERD = '33' +REPLICG_STATUS_INTERRUPTED = '34' +REPLICG_STATUS_SPLITED = '26' +REPLICG_STATUS_INVALID = '35' +REPLICG_HEALTH_NORMAL = '1' + +OPTIMAL_MULTIPATH_NUM = 16 + +AVAILABLE_FEATURE_STATUS = (1, 2) +DEDUP_FEATURES = ('SmartDedupe (for LUN)', + 'SmartDedupe (for LUNsAndFS)', + 'SmartDedupe & SmartCompression (for LUN)', + 'Effective Capacity') +COMPRESSION_FEATURES = ('SmartCompression (for LUN)', + 'SmartCompression (for LUNsAndFS)', + 'SmartDedupe & SmartCompression (for LUN)', + 'Effective Capacity') + +FEATURES_DICTS = { + "SmartPartition": "cachepartition", + "SmartCache": "smartcachepartition", + "SmartQoS": "ioclass", + "HyperCopy": "LUNCOPY", + "SmartThin": None, + "HyperMetro": "HyperMetroPair", +} + +DEFAULT_CLONE_MODE = "luncopy" + +HYPERMETRO_WAIT_INTERVAL = 5 + +FC_PORT_STATUS_NORMAL = "1" +FC_PORT_STATUS_RUNNING = "2" +FC_PORT_STATUS_LINK_UP = "10" +FC_PORT_STATUS_LINK_DOWN = "11" +FC_PORT_RUNNING_NORMAL = (FC_PORT_STATUS_NORMAL, FC_PORT_STATUS_RUNNING, + FC_PORT_STATUS_LINK_UP, FC_PORT_STATUS_LINK_DOWN) +FC_PORT_SPEED_ERROR = "-1" + +CLONE_STATUS_HEALTH = '0' +CLONE_STATUS_COMPLETE = (CLONE_COMPLETE,) = ('2',) +CLONE_PAIR_NOT_EXIST = "1073798147" +SUPPORT_CLONE_PAIR_VERSION = "V600R003C00" + +LUN_HEALTH_STATUS_KEY = 'HEALTHSTATUS' +LUN_HEALTH_STATUS_NORMAL = '1' + +LUN_RUNNING_STATUS_KEY = 'RUNNINGSTATUS' +LUN_RUNNING_STATUS_ONLINE = '27' + +FUNCTION_TYPE_LUN = '1' +FUNCTION_TYPE_SNAP = '2' +FUNCTION_TYPE_CLONE = '3' + +LUN_USAGE_TYPE_TRADITIONAL = '0' + diff --git a/PowerVC/extend/__init__.py b/PowerVC/extend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/PowerVC/extend/fc_zone_helper.py b/PowerVC/extend/fc_zone_helper.py new file mode 100644 index 0000000..bfc97a3 --- /dev/null +++ b/PowerVC/extend/fc_zone_helper.py @@ -0,0 +1,278 @@ +# Copyright (c) 2015 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import importutils + +from cinder import exception +from cinder.i18n import _ +from cinder.volume import configuration as config +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + +controller_list = ['A', 'B', 'C', 'D'] + +zone_manager_opts = [ + cfg.StrOpt('zone_driver', + default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' + '.BrcdFCZoneDriver', + help='FC Zone Driver responsible for zone management') +] + + +class FCZoneHelper(object): + """FC zone helper for Huawei driver.""" + + def __init__(self, zm, client): + self.zm = zm + self.client = client + + def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos): + """Check FC port on array and wwn on host is connected to switch. + + If no FC port on array is connected to switch or no ini on host is + connected to switch, raise a error. + """ + if not fabric_map: + msg = _('No FC port on array is connected to switch.') + LOG.error(msg) + raise exception.CinderException(msg) + + no_wwn_connected_to_switch = True + for wwn in wwns: + formatted_initiator = fczm_utils.get_formatted_wwn(wwn) + for fabric in fabric_map: + nsinfo = nsinfos[fabric] + if formatted_initiator in nsinfo: + no_wwn_connected_to_switch = False + self.client.ensure_fc_initiator_added(wwn, hostid) + break + if no_wwn_connected_to_switch: + msg = _('No wwn on host is connected to switch.') + LOG.error(msg) + raise exception.CinderException(msg) + + def build_ini_tgt_map(self, wwns, host_id, port_list, is_add): + fabric_map = self.zm.get_san_context(port_list) + + nsinfos = {} + cfgmap_from_fabrics = {} + for fabric in fabric_map: + nsinfos[fabric] = self._get_nameserver_info(fabric) + cfgmap_from_fabric = self._get_active_zone_set(fabric) + cfgmap_from_fabrics[fabric] = cfgmap_from_fabric + + self._check_fc_port_and_init(wwns, host_id, fabric_map, nsinfos) + return self._build_ini_tgt_map(wwns, is_add, nsinfos, + cfgmap_from_fabrics) + + def _build_ini_tgt_map(self, wwns, need_add_con, nsinfos, + cfgmap_from_fabrics): + tgt_port_wwns = [] + init_targ_map_total = {} + fabric_maps = {} + for contr in controller_list: + port_list_from_contr = self.client.get_fc_ports_from_contr(contr) + if port_list_from_contr: + fabric_map = self.zm.get_san_context(port_list_from_contr) + fabric_maps[contr] = fabric_map + for wwn in wwns: + init_targ_map = {} + tmp_port_list = [] + tgt_port_for_map = [] + tmp_flag = False + need_new_zone = False + for contr in fabric_maps: + (fc_port_for_zone, tmp_flag) = \ + self._get_one_fc_port_for_zone(wwn, contr, nsinfos, + cfgmap_from_fabrics, + fabric_maps) + if tmp_flag: + need_new_zone = True + if fc_port_for_zone: + tgt_port_wwns.append(fc_port_for_zone) + if not tmp_flag: + tgt_port_for_map.append(fc_port_for_zone) + if tmp_flag: + tmp_port_list.append(fc_port_for_zone) + + init_targ_map[wwn] = tmp_port_list + LOG.debug("tmp_port_list: %s" % tmp_port_list) + init_targ_map_total[wwn] = tgt_port_for_map + if need_new_zone and need_add_con: + LOG.debug("Got init_targ_map to create zone: %s" + % init_targ_map) + self.zm.add_connection(init_targ_map) + + tgt_port_wwns = list(set(tgt_port_wwns)) + + return (tgt_port_wwns, init_targ_map_total) + + def _get_fabric_vendor(self): + zone_config = config.Configuration(zone_manager_opts, + 'fc-zone-manager') + fabric_driver = zone_config.zone_driver + LOG.debug('Using fabric driver: %s' % fabric_driver) + driver_vendor = None + try: + driver_vendor = fabric_driver.split('.')[3] + except Exception: + msg = _('Get fabric driver vendor error.') + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + return driver_vendor + + def _get_nameserver_info(self, fabric): + driver_vendor = self._get_fabric_vendor() + if driver_vendor == 'brocade': + nsinfo = self._get_brcd_nsinfo(fabric) + elif driver_vendor == 'cisco': + nsinfo = self._get_cisco_nsinfo(fabric) + else: + msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return nsinfo + + def _get_cisco_config(self, fabric): + fabric_ip = self.zm.driver.fabric_configs[fabric].safe_get( + 'cisco_fc_fabric_address') + fabric_user = self.zm.driver.fabric_configs[fabric].safe_get( + 'cisco_fc_fabric_user') + fabric_pwd = self.zm.driver.fabric_configs[fabric].safe_get( + 'cisco_fc_fabric_password') + fabric_port = self.zm.driver.fabric_configs[fabric].safe_get( + 'cisco_fc_fabric_port') + zoning_vsan = self.zm.driver.fabric_configs[fabric].safe_get( + 'cisco_zoning_vsan') + return (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) + + def _get_brcd_nsinfo(self, fabric): + conn = self.zm.driver._get_cli_client(fabric) + try: + nsinfo = conn.get_nameserver_info() + LOG.debug("name server info from fabric: %s", nsinfo) + conn.cleanup() + except exception.BrocadeZoningCliException: + if not conn.is_supported_firmware(): + msg = _("Unsupported firmware on switch %s. Make sure " + "switch is running firmware v6.4 or higher." + ) % conn.switch_ip + LOG.error(msg) + raise exception.FCZoneDriverException(msg) + with excutils.save_and_reraise_exception(): + LOG.exception("Error getting name server info.") + except Exception: + msg = _("Failed to get name server info.") + LOG.exception(msg) + raise exception.FCZoneDriverException(msg) + return nsinfo + + def _get_cisco_nsinfo(self, fabric): + (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = ( + self._get_cisco_config(fabric)) + try: + conn = importutils.import_object( + self.zm.driver.configuration.cisco_sb_connector, + ipaddress=fabric_ip, + username=fabric_user, + password=fabric_pwd, port=fabric_port, + vsan=zoning_vsan) + nsinfo = conn.get_nameserver_info() + LOG.debug("name server info from fabric: %s", + nsinfo) + conn.cleanup() + except exception.CiscoZoningCliException: + with excutils.save_and_reraise_exception(): + LOG.exception("Error getting show fcns database " + "info.") + except Exception: + msg = ("Failed to get show fcns database info.") + LOG.exception(msg) + raise exception.FCZoneDriverException(msg) + return nsinfo + + def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos, + cfgmap_from_fabrics, fabric_maps): + """Get on FC port per one controller. + + task flow: + 1. Get all the FC port from the array. + 2. Filter out ports belonged to the specific controller + and the status is connected. + 3. Filter out ports connected to the fabric configured in cinder.conf. + 4. Get active zones set from switch. + 5. Find a port according to three cases. + """ + LOG.info("Get in function _get_one_fc_port_for_zone. " + "Initiator: %s", initiator) + + formatted_initiator = fczm_utils.get_formatted_wwn(initiator) + fabric_map = fabric_maps[contr] + if not fabric_map: + return (None, False) + + port_zone_number_map = {} + + for fabric in fabric_map: + LOG.info("Dealing with fabric: %s", fabric) + nsinfo = nsinfos[fabric] + if formatted_initiator not in nsinfo: + continue + + final_port_list_per_fabric = fabric_map[fabric] + cfgmap_from_fabric = cfgmap_from_fabrics[fabric] + + zones_members = cfgmap_from_fabric['zones'].values() + + for port in final_port_list_per_fabric: + port_zone_number_map[port] = 0 + formatted_port = fczm_utils.get_formatted_wwn(port) + for zones_member in zones_members: + if formatted_port in zones_member: + # For the second case use. + if formatted_initiator in zones_member: + # First case: found a port in the same + # zone with the given initiator. + return (port, False) + # For the third case use. + port_zone_number_map[port] += 1 + if port_zone_number_map == {}: + return (None, False) + + temp_list = [] + temp_list = sorted(port_zone_number_map.items(), key=lambda d: d[1]) + # Third case: find a port referenced in fewest zone. + return (temp_list[0][0], True) + + def _get_active_zone_set(self, fabric): + driver_vendor = self._get_fabric_vendor() + if driver_vendor == 'brocade': + conn = self.zm.driver._get_cli_client(fabric) + cfgmap_from_fabric = self.zm.driver._get_active_zone_set(conn) + conn.cleanup() + elif driver_vendor == 'cisco': + (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = ( + self._get_cisco_config(fabric)) + cfgmap_from_fabric = self.zm.driver.get_active_zone_set( + fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) + else: + msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return cfgmap_from_fabric diff --git a/PowerVC/fc_zone_helper.py b/PowerVC/fc_zone_helper.py new file mode 100644 index 0000000..c33372c --- /dev/null +++ b/PowerVC/fc_zone_helper.py @@ -0,0 +1,149 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants + + +LOG = logging.getLogger(__name__) + + +class FCZoneHelper(object): + """FC zone helper for Huawei driver.""" + + def __init__(self, fcsan_lookup_service, client): + self.fc_san = fcsan_lookup_service + self.client = client + + def _get_online_fc_ports(self): + port_map = {} + + fc_ports = self.client.get_fc_ports() + for port in fc_ports: + if port['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: + port_wwn = port['WWN'] + port_map[port_wwn] = { + 'id': port['ID'], + 'runspeed': int(port['RUNSPEED']), + } + + return port_map + + def _get_fabric(self, ini_port_wwns, tgt_port_wwns): + ini_tgt_map = self.fc_san.get_device_mapping_from_network( + ini_port_wwns, tgt_port_wwns) + + def _filter_not_connected_fabric(fabric_name, fabric): + ini_port_wwn_list = fabric.get('initiator_port_wwn_list') + tgt_port_wwn_list = fabric.get('target_port_wwn_list') + + if not ini_port_wwn_list or not tgt_port_wwn_list: + LOG.warning("Fabric %(fabric_name)s doesn't really " + "connect host and array: %(fabric)s.", + {'fabric_name': fabric_name, + 'fabric': fabric}) + return None + + return [ini_port_wwn_list, tgt_port_wwn_list] + + valid_fabrics = [] + for fabric in ini_tgt_map: + pair = _filter_not_connected_fabric(fabric, ini_tgt_map[fabric]) + if pair: + valid_fabrics.append(pair) + + if not valid_fabrics: + msg = _("No valid fabric connection: %s.") % ini_tgt_map + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info("Got fabric: %s.", valid_fabrics) + return valid_fabrics + + def _get_used_ports(self, host_id): + portgroup_id = self.client.get_tgt_port_group( + constants.PORTGROUP_PREFIX + host_id) + if not portgroup_id: + return [] + + ports = self.client.get_ports_by_portg(portgroup_id) + return ports + + def _get_fc_zone(self, wwns, host_id): + port_map = self._get_online_fc_ports() + + fabrics = self._get_fabric(wwns, list(port_map.keys())) + used_ports = self._get_used_ports(host_id) + + total_ports = [] + ini_tgt_map = {} + for fabric in fabrics: + total_ports = list(set(total_ports) | set(fabric[1])) + new_ports = list(set(fabric[1]) - set(used_ports)) + if not new_ports: + continue + for ini in fabric[0]: + if ini not in ini_tgt_map: + ini_tgt_map[ini] = new_ports + else: + ini_tgt_map[ini].extend(new_ports) + + return ini_tgt_map, total_ports, port_map + + def build_ini_targ_map(self, wwns, host_id): + ini_tgt_map, total_ports, port_map = self._get_fc_zone(wwns, host_id) + + new_ports = set() + for v in six.itervalues(ini_tgt_map): + new_ports |= set(v) + + portgroup_name = constants.PORTGROUP_PREFIX + host_id + portgroup_id = self.client.get_tgt_port_group(portgroup_name) + if portgroup_id: + LOG.info("Got existing portgroup: %s.", portgroup_name) + for port in new_ports: + self.client.add_port_to_portg(portgroup_id, port_map[port]['id']) + + LOG.info("ini_targ_map: %(map)s, target_wwns: %(wwns)s.", + {"map": ini_tgt_map, + "wwns": total_ports}) + return list(total_ports), portgroup_id, ini_tgt_map + + def get_init_targ_map(self, wwns, host_id): + error_ret = ([], {}) + if not host_id: + return error_ret + + view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = self.client.find_mapping_view(view_name) + if not view_id: + return error_ret + + portg_id = self.client.get_portgroup_by_view(view_id) + if portg_id: + ports_in_group = self.client.get_fc_ports_by_portgroup(portg_id) + for port_id in ports_in_group.values(): + self.client.remove_port_from_portgroup(portg_id, port_id) + ports = list(ports_in_group.keys()) + else: + fc_ports = self.client.get_fc_ports() + ports = [p['WWN'] for p in fc_ports] + + return portg_id, dict.fromkeys(wwns, ports) diff --git a/PowerVC/huawei_conf.py b/PowerVC/huawei_conf.py new file mode 100644 index 0000000..e845b87 --- /dev/null +++ b/PowerVC/huawei_conf.py @@ -0,0 +1,512 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Set Huawei private configuration into Configuration object. + +For conveniently get private configuration. We parse Huawei config file +and set every property into Configuration object as an attribute. +""" + +import base64 +import six +from defusedxml import ElementTree as ET + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class HuaweiConf(object): + def __init__(self, conf): + self.conf = conf + + def _encode_authentication(self): + need_encode = False + tree = ET.parse(self.conf.cinder_huawei_conf_file) + xml_root = tree.getroot() + name_node = xml_root.find('Storage/UserName') + pwd_node = xml_root.find('Storage/UserPassword') + vstore_node = xml_root.find('Storage/vStoreName') + if name_node is not None and not name_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(name_node.text)).decode() + name_node.text = '!$$$' + encoded + need_encode = True + + if pwd_node is not None and not pwd_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(pwd_node.text)).decode() + pwd_node.text = '!$$$' + encoded + need_encode = True + + if vstore_node is not None and not vstore_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(vstore_node.text)).decode() + vstore_node.text = '!$$$' + encoded + need_encode = True + + if need_encode: + utils.execute('chmod', + '600', + self.conf.cinder_huawei_conf_file, + run_as_root=True) + tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8') + + def update_config_value(self): + self._encode_authentication() + + set_attr_funcs = (self._san_address, + self._san_user, + self._san_password, + self._vstore_name, + self._san_product, + self._san_protocol, + self._lun_type, + self._lun_ready_wait_interval, + self._lun_copy_wait_interval, + self._lun_timeout, + self._lun_write_type, + self._lun_prefetch, + self._storage_pools, + self._iscsi_default_target_ip, + self._iscsi_info, + self._fc_info, + self._ssl_cert_path, + self._ssl_cert_verify, + self._lun_copy_speed, + self._dedup_license, + self._compression_license,) + + tree = ET.parse(self.conf.cinder_huawei_conf_file) + xml_root = tree.getroot() + for f in set_attr_funcs: + f(xml_root) + + @staticmethod + def _get_ssl_verify(xml_root): + value = False + text = xml_root.findtext('Storage/SSLCertVerify') + if text: + if text.lower() in ('true', 'false'): + value = text.lower() == 'true' + else: + msg = _("SSLCertVerify configured error.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return value + + def _ssl_cert_path(self, xml_root): + text = xml_root.findtext('Storage/SSLCertPath') + ssl_value = self._get_ssl_verify(xml_root) + if text and ssl_value: + setattr(self.conf, 'ssl_cert_path', text) + elif not text and ssl_value: + msg = _("Cert path is necessary if SSLCertVerify is True.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + else: + setattr(self.conf, 'ssl_cert_path', None) + + def _ssl_cert_verify(self, xml_root): + value = self._get_ssl_verify(xml_root) + setattr(self.conf, 'ssl_cert_verify', value) + + def _san_address(self, xml_root): + text = xml_root.findtext('Storage/RestURL') + if not text: + msg = _("RestURL is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + addrs = text.split(';') + addrs = list(set([x.strip() for x in addrs if x.strip()])) + setattr(self.conf, 'san_address', addrs) + + def _san_user(self, xml_root): + text = xml_root.findtext('Storage/UserName') + if not text: + msg = _("UserName is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + user = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_user', user) + + def _san_password(self, xml_root): + text = xml_root.findtext('Storage/UserPassword') + if not text: + msg = _("UserPassword is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pwd = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_password', pwd) + + def _vstore_name(self, xml_root): + text = xml_root.findtext('Storage/vStoreName') + if text: + vstore_name = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'vstore_name', vstore_name) + else: + setattr(self.conf, 'vstore_name', None) + + def _set_extra_constants_by_product(self, product): + extra_constants = {} + if product in constants.DORADO_V6_AND_V6_PRODUCT: + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'maxBandWidth', 'IOType') + extra_constants['QOS_IOTYPES'] = ('2',) + extra_constants['SUPPORT_LUN_TYPES'] = ('Thin',) + extra_constants['DEFAULT_LUN_TYPE'] = 'Thin' + else: + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'minIOPS', 'minBandWidth', + 'maxBandWidth', 'latency', 'IOType') + extra_constants['QOS_IOTYPES'] = ('0', '1', '2') + extra_constants['SUPPORT_LUN_TYPES'] = ('Thick', 'Thin') + extra_constants['DEFAULT_LUN_TYPE'] = 'Thick' + + for k in extra_constants: + setattr(constants, k, extra_constants[k]) + + def _san_product(self, xml_root): + text = xml_root.findtext('Storage/Product') + if not text: + msg = _("SAN product is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + product = text.strip() + if product not in constants.VALID_PRODUCT: + msg = (_("Invalid SAN product '%(text)s', SAN product must be in " + "%(valid)s.") + % {'text': product, 'valid': constants.VALID_PRODUCT}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self._set_extra_constants_by_product(product) + setattr(self.conf, 'san_product', product) + + def _san_protocol(self, xml_root): + text = xml_root.findtext('Storage/Protocol') + if not text: + msg = _("SAN protocol is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if text not in constants.VALID_PROTOCOL: + msg = (_("Invalid SAN protocol '%(text)s', SAN protocol must be " + "in %(valid)s.") + % {'text': text, 'valid': constants.VALID_PROTOCOL}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + protocol = text.strip() + setattr(self.conf, 'san_protocol', protocol) + + def _lun_type(self, xml_root): + lun_type = constants.DEFAULT_LUN_TYPE + text = xml_root.findtext('LUN/LUNType') + if text: + lun_type = text.strip() + if lun_type not in constants.LUN_TYPE_MAP: + msg = _("Invalid lun type %s is configured.") % lun_type + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if lun_type not in constants.SUPPORT_LUN_TYPES: + msg = _("%(array)s array requires %(valid)s lun type, " + "but %(conf)s is specified." + ) % {'array': self.conf.san_product, + 'valid': constants.SUPPORT_LUN_TYPES, + 'conf': lun_type} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'lun_type', constants.LUN_TYPE_MAP[lun_type]) + + def _lun_ready_wait_interval(self, xml_root): + text = xml_root.findtext('LUN/LUNReadyWaitInterval') + + if text and not text.isdigit(): + msg = (_("Invalid LUN_Ready_Wait_Interval '%s', " + "LUN_Ready_Wait_Interval must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL + setattr(self.conf, 'lun_ready_wait_interval', int(interval)) + + def _lun_copy_wait_interval(self, xml_root): + text = xml_root.findtext('LUN/LUNcopyWaitInterval') + + if text and not text.isdigit(): + msg = (_("Invalid LUN_Copy_Wait_Interval '%s', " + "LUN_Copy_Wait_Interval must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL + setattr(self.conf, 'lun_copy_wait_interval', int(interval)) + + def _lun_timeout(self, xml_root): + text = xml_root.findtext('LUN/Timeout') + + if text and not text.isdigit(): + msg = (_("Invalid LUN timeout '%s', LUN timeout must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT + setattr(self.conf, 'lun_timeout', int(interval)) + + def _lun_write_type(self, xml_root): + text = xml_root.findtext('LUN/WriteType') + write_type = text.strip() if text else '1' + + if write_type not in constants.VALID_WRITE_TYPE: + msg = (_("Invalid LUN WriteType '%(text)s', LUN WriteType must be " + "in %(valid)s.") + % {'text': write_type, 'valid': constants.VALID_WRITE_TYPE}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'lun_write_type', write_type) + + def _lun_prefetch(self, xml_root): + prefetch_type = '3' + prefetch_value = '0' + + node = xml_root.find('LUN/Prefetch') + if (node is not None + and node.attrib['Type'] + and node.attrib['Value']): + prefetch_type = node.attrib['Type'].strip() + if prefetch_type not in ['0', '1', '2', '3']: + msg = (_( + "Invalid prefetch type '%s' is configured. " + "PrefetchType must be in 0,1,2,3.") % prefetch_type) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + prefetch_value = node.attrib['Value'].strip() + factor = {'1': 2} + factor = int(factor.get(prefetch_type, '1')) + prefetch_value = int(prefetch_value) * factor + prefetch_value = six.text_type(prefetch_value) + + setattr(self.conf, 'lun_prefetch_type', prefetch_type) + setattr(self.conf, 'lun_prefetch_value', prefetch_value) + + def _storage_pools(self, xml_root): + nodes = xml_root.findall('LUN/StoragePool') + if not nodes: + msg = _('Storage pool is not configured.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + texts = [x.text for x in nodes] + merged_text = ';'.join(texts) + pools = set(x.strip() for x in merged_text.split(';') if x.strip()) + if not pools: + msg = _('Invalid storage pool is configured.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + setattr(self.conf, 'storage_pools', list(pools)) + + def _iscsi_default_target_ip(self, xml_root): + text = xml_root.findtext('iSCSI/DefaultTargetIP') + target_ip = text.split() if text else [] + setattr(self.conf, 'iscsi_default_target_ip', target_ip) + + def _iscsi_info(self, xml_root): + nodes = xml_root.findall('iSCSI/Initiator') + if nodes is None: + setattr(self.conf, 'iscsi_info', []) + return + + iscsi_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + iscsi_info.append(props) + + setattr(self.conf, 'iscsi_info', iscsi_info) + + def _fc_info(self, xml_root): + nodes = xml_root.findall('FC/Initiator') + if nodes is None: + setattr(self.conf, 'fc_info', []) + return + + fc_info = [] + for node in nodes: + props = {} + for item in node.items(): + props[item[0].strip()] = item[1].strip() + + fc_info.append(props) + + setattr(self.conf, 'fc_info', fc_info) + + def _parse_rmt_iscsi_info(self, iscsi_info): + if not (iscsi_info and iscsi_info.strip()): + return [] + + # Consider iscsi_info value: + # ' {Name:xxx ;;TargetPortGroup: xxx};\n' + # '{Name:\t\rxxx;CHAPinfo: mm-usr#mm-pwd} ' + + # Step 1, ignore whitespace characters, convert to: + # '{Name:xxx;;TargetPortGroup:xxx};{Name:xxx;CHAPinfo:mm-usr#mm-pwd}' + iscsi_info = ''.join(iscsi_info.split()) + + # Step 2, make initiators configure list, convert to: + # ['Name:xxx;;TargetPortGroup:xxx', 'Name:xxx;CHAPinfo:mm-usr#mm-pwd'] + initiator_infos = iscsi_info[1:-1].split('};{') + + # Step 3, get initiator configure pairs, convert to: + # [['Name:xxx', '', 'TargetPortGroup:xxx'], + # ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']] + initiator_infos = map(lambda x: x.split(';'), initiator_infos) + + # Step 4, remove invalid configure pairs, convert to: + # [['Name:xxx', 'TargetPortGroup:xxx'], + # ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']] + initiator_infos = map(lambda x: filter(lambda y: y, x), + initiator_infos) + + # Step 5, make initiators configure dict, convert to: + # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, + # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] + get_opts = lambda x: x.split(':', 1) + initiator_infos = map(lambda x: dict(map(get_opts, x)), + initiator_infos) + # Convert generator to list for py3 compatibility. + initiator_infos = list(initiator_infos) + + # Step 6, replace CHAPinfo 'user#pwd' to 'user;pwd' + key = 'CHAPinfo' + for info in initiator_infos: + if key in info: + info[key] = info[key].replace('#', ';', 1) + + return initiator_infos + + def get_hypermetro_devices(self): + devs = self.conf.safe_get('hypermetro_device') + if not devs: + return [] + + devs_config = [] + for dev in devs: + dev_config = {} + dev_config['san_address'] = dev['san_address'].split(';') + dev_config['san_user'] = dev['san_user'] + dev_config['san_password'] = dev['san_password'] + dev_config['vstore_name'] = dev.get('vstore_name') + dev_config['metro_domain'] = dev['metro_domain'] + dev_config['storage_pools'] = dev['storage_pool'].split(';') + dev_config['iscsi_info'] = self._parse_rmt_iscsi_info( + dev.get('iscsi_info')) + dev_config['fc_info'] = self._parse_rmt_iscsi_info( + dev.get('fc_info')) + dev_config['iscsi_default_target_ip'] = ( + dev['iscsi_default_target_ip'].split(';') + if 'iscsi_default_target_ip' in dev + else []) + devs_config.append(dev_config) + + return devs_config + + def get_replication_devices(self): + devs = self.conf.safe_get('replication_device') + if not devs: + return [] + + devs_config = [] + for dev in devs: + dev_config = {} + dev_config['backend_id'] = dev['backend_id'] + dev_config['san_address'] = dev['san_address'].split(';') + dev_config['san_user'] = dev['san_user'] + dev_config['san_password'] = dev['san_password'] + dev_config['vstore_name'] = dev.get('vstore_name') + dev_config['storage_pools'] = dev['storage_pool'].split(';') + dev_config['iscsi_info'] = self._parse_rmt_iscsi_info( + dev.get('iscsi_info')) + dev_config['fc_info'] = self._parse_rmt_iscsi_info( + dev.get('fc_info')) + dev_config['iscsi_default_target_ip'] = ( + dev['iscsi_default_target_ip'].split(';') + if 'iscsi_default_target_ip' in dev + else []) + devs_config.append(dev_config) + + return devs_config + + def get_local_device(self): + dev_config = { + 'backend_id': "default", + 'san_address': self.conf.san_address, + 'san_user': self.conf.san_user, + 'san_password': self.conf.san_password, + 'vstore_name': self.conf.vstore_name, + 'storage_pools': self.conf.storage_pools, + 'iscsi_info': self.conf.iscsi_info, + 'fc_info': self.conf.fc_info, + 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, + } + return dev_config + + def _lun_copy_speed(self, xml_root): + text = xml_root.findtext('LUN/LUNCopySpeed') + if text and text.strip() not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("Invalid LUNCopySpeed '%(text)s', LUNCopySpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.LUN_COPY_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'lun_copy_speed', int(speed)) + + def _set_license_configuration(self, text, license_type): + license_list = [] + if text: + license_list = list(set(x.strip() for x in text.split(';') if x.strip())) + setattr(self.conf, license_type, license_list) + + def _dedup_license(self, xml_root): + text = xml_root.findtext('LUN/DedupLicense') + + self._set_license_configuration(text, 'dedup_license') + + def _compression_license(self, xml_root): + text = xml_root.findtext('LUN/CompressionLicense') + self._set_license_configuration(text, 'compression_license') diff --git a/PowerVC/huawei_driver.py b/PowerVC/huawei_driver.py new file mode 100644 index 0000000..3612fc0 --- /dev/null +++ b/PowerVC/huawei_driver.py @@ -0,0 +1,3581 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import json +import re +import uuid + +import six +from oslo_config import cfg +from oslo_config import types +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from oslo_utils import units + +from cinder import context +from cinder import coordination +from cinder import exception +from cinder import objects +from cinder.i18n import _ +from cinder.objects import fields +from cinder.volume import driver +from cinder.volume import volume_types +from cinder.volume import volume_utils +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import fc_zone_helper +from cinder.volume.drivers.huawei import huawei_conf +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import rest_client +from cinder.volume.drivers.huawei import smartx +from cinder.zonemanager import utils as fczm_utils +from powervc_cinder.db import api as powervc_db_api +from powervc_cinder.volume import discovery_driver +from powervc_cinder.volume.drivers.ibm_storage.common_driver import \ + RESTRICTED_METADATA_VDISK_ID_KEY +from powervc_cinder.volume.drivers.ibm_storage.common_driver import \ + RESTRICTED_METADATA_VDISK_NAME_KEY +from powervc_cinder.volume.drivers.ibm_storage.common_driver import \ + RESTRICTED_METADATA_VDISK_UID_KEY + +LOG = logging.getLogger(__name__) + +huawei_opts = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='The configuration file for the Cinder Huawei driver.'), + cfg.MultiOpt('hypermetro_device', + item_type=types.Dict(), + secret=True, + help='Multi opt of dictionaries to represent a hypermetro ' + 'target device. This option may be specified multiple ' + 'times in a single config section to specify multiple ' + 'hypermetro target devices. Each entry takes the ' + 'standard dict config form: hypermetro_device = ' + 'key1:value1,key2:value2...'), + cfg.BoolOpt('libvirt_iscsi_use_ultrapath', + default=False, + help='use ultrapath connection of the iSCSI volume'), +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts) + +snap_attrs = ('id', 'volume_id', 'volume', 'provider_location', 'metadata') +vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') +Snapshot = collections.namedtuple('Snapshot', snap_attrs) +Volume = collections.namedtuple('Volume', vol_attrs) + + +class HuaweiBaseDriver(driver.VolumeDriver, + discovery_driver.VolumeDiscoveryDriver): + VERSION = "2.6.4" + + def __init__(self, *args, **kwargs): + super(HuaweiBaseDriver, self).__init__(*args, **kwargs) + + if not self.configuration: + msg = _('Configuration is not found.') + raise exception.InvalidInput(reason=msg) + + self.active_backend_id = kwargs.get('active_backend_id') + + self.configuration.append_config_values(huawei_opts) + self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) + self.support_func = None + self.metro_flag = False + self.replica = None + self.use_ultrapath = self.configuration.safe_get( + 'libvirt_iscsi_use_ultrapath') + self.sn = 'NA' + self.client = None + self.rmt_client = None + self.replica_client = None + + def check_local_func_support(self, obj_name): + try: + self.client._get_object_count(obj_name) + return True + except Exception: + return False + + def check_rmt_func_support(self, obj_name): + try: + self.rmt_client._get_object_count(obj_name) + return True + except Exception: + return False + + def check_replica_func_support(self, obj_name): + try: + self.replica_client._get_object_count(obj_name) + return True + except Exception: + return False + + def get_local_and_remote_dev_conf(self): + self.loc_dev_conf = self.huawei_conf.get_local_device() + + # Now just support one replication_devices. + replica_devs = self.huawei_conf.get_replication_devices() + self.replica_dev_conf = replica_devs[0] if replica_devs else {} + + def get_local_and_remote_client_conf(self): + if self.active_backend_id: + return self.replica_dev_conf, self.loc_dev_conf + else: + return self.loc_dev_conf, self.replica_dev_conf + + def do_setup(self, context): + """Instantiate common class and login storage system.""" + # Set huawei private configuration into Configuration object. + self.huawei_conf.update_config_value() + + self.get_local_and_remote_dev_conf() + client_conf, replica_client_conf = ( + self.get_local_and_remote_client_conf()) + + # init local client + if not client_conf: + msg = _('Get active client failed.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.client = rest_client.RestClient(self.configuration, + **client_conf) + self.sn = self.client.login() + self.client.check_storage_pools() + + # init hypermetro remote client + hypermetro_devs = self.huawei_conf.get_hypermetro_devices() + hypermetro_client_conf = hypermetro_devs[0] if hypermetro_devs else {} + if hypermetro_client_conf: + self.rmt_client = rest_client.RestClient(self.configuration, + **hypermetro_client_conf) + self.rmt_client.login() + self.metro_flag = True + + # init replication manager + if replica_client_conf: + self.replica_client = rest_client.RestClient(self.configuration, + **replica_client_conf) + self.replica_client.try_login() + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + + def check_for_setup_error(self): + pass + + def get_volume_stats(self, refresh=False): + """Get volume status and reload huawei config file.""" + self.huawei_conf.update_config_value() + stats = self.client.update_volume_stats() + stats = self.update_support_capability(stats) + + if self.replica: + stats = self.replica.update_replica_capability(stats) + targets = [self.replica_dev_conf['backend_id']] + stats['replication_targets'] = targets + stats['replication_enabled'] = True + + return stats + + @staticmethod + def _is_feature_support(feature_status, default_license, config_license): + feature_tuple = tuple(set(default_license) | set(config_license)) + return huawei_utils.check_feature_available( + feature_status, feature_tuple) + + def update_support_capability(self, stats): + feature_status = self.client.get_license_feature_status() + license_dict = {} + for key in constants.FEATURES_DICTS: + if key in feature_status: + license_dict[key] = (feature_status.get(key) in + constants.AVAILABLE_FEATURE_STATUS) + else: + # In order to adapt the storage array in lower version + if constants.FEATURES_DICTS[key]: + license_dict[key] = self.check_local_func_support( + constants.FEATURES_DICTS[key]) + + feature_dict = { + 'smartcache': license_dict.get('SmartCache', False), + 'smartpartition': license_dict.get('SmartPartition', False), + 'QoS_support': license_dict.get('SmartQoS', False), + 'luncopy': license_dict.get('HyperCopy', False), + 'hypermetro': license_dict.get('HyperMetro', False), + 'thin_provisioning_support': license_dict.get('SmartThin', False), + 'thick_provisioning_support': True, + 'consistencygroup_support': True, + 'multiattach': True, + 'huawei_controller': True, + 'dedup': [self._is_feature_support( + feature_status, constants.DEDUP_FEATURES, + self.configuration.dedup_license), False], + 'compression': [self._is_feature_support( + feature_status, constants.COMPRESSION_FEATURES, + self.configuration.compression_license), False], + 'huawei_application_type': False, + } + + for pool in stats['pools']: + pool.update(feature_dict) + + if self.configuration.san_product in \ + constants.DORADO_V6_AND_V6_PRODUCT: + pool['smarttier'] = False + pool['thick_provisioning_support'] = False + + pool['smarttier'] = (feature_status.get('SmartTier') in + constants.AVAILABLE_FEATURE_STATUS and + pool['smarttier']) + pool['hypermetro'] = (feature_dict['hypermetro'] and + self._get_rmt_license_features( + "HyperMetro", "HyperMetroPair")) + # Asign the support function to global paramenter, + # except "smarttier". + self.support_func = pool + + return stats + + def _get_rmt_license_features(self, obj_name, cnt_name): + if self.metro_flag: + rmt_feature_status = self.rmt_client.get_license_feature_status() + if obj_name in rmt_feature_status: + return (rmt_feature_status[obj_name] in + constants.AVAILABLE_FEATURE_STATUS) + else: + # In order to adapt the storage array in lower version + return self.check_rmt_func_support(cnt_name) + else: + return False + + def _get_volume_type(self, volume): + volume_type = None + type_id = volume.volume_type_id + if type_id: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + + return volume_type + + def _get_volume_params(self, volume_type): + """Return the parameters for creating the volume.""" + specs = {} + if volume_type: + specs = dict(volume_type).get('extra_specs') + + opts = self._get_volume_params_from_specs(specs) + return opts + + def _get_volume_params_from_specs(self, specs): + """Return the volume parameters from extra specs.""" + opts_capability = { + 'smarttier': False, + 'smartcache': False, + 'smartpartition': False, + 'thin_provisioning_support': False, + 'thick_provisioning_support': False, + 'hypermetro': False, + 'replication_enabled': False, + 'replication_type': 'async', + 'huawei_controller': False, + 'dedup': None, + 'compression': None, + } + + opts_value = { + 'policy': None, + 'partitionname': None, + 'cachename': None, + 'controllername': None, + } + + opts_associate = { + 'smarttier': 'policy', + 'smartcache': 'cachename', + 'smartpartition': 'partitionname', + 'huawei_controller': 'controllername', + } + + opts = self._get_opts_from_specs(opts_capability, + opts_value, + opts_associate, + specs) + + opts = smartx.SmartX(self.client).get_smartx_specs_opts(opts) + opts = replication.get_replication_opts(opts) + LOG.debug('volume opts %(opts)s.', {'opts': opts}) + return opts + + def _get_opts_from_specs(self, opts_capability, opts_value, + opts_associate, specs): + """Get the well defined extra specs.""" + opts = {} + opts.update(opts_capability) + opts.update(opts_value) + + # the analysis of key-value + for key, value in specs.items(): + # Get the scope, if is using scope format. + scope = None + + key_split = key.split(':') + if len(key_split) > 2 and key_split[0] != "capabilities": + continue + + if len(key_split) == 1: + key = key_split[0].lower() + else: + scope = key_split[0].lower() + key = key_split[1].lower() + + if (not scope or scope == 'capabilities' + and key in opts_capability): + words = value.split() + if words and len(words) == 2 and words[0] in ( + '', ''): + opts[key] = words[1].lower() + elif key == 'replication_type': + LOG.error("Extra specs must be specified as " + "replication_type=' sync' or " + "' async'.") + else: + LOG.warning("Extra specs must be specified as " + "capabilities:%s=' True'.", key) + + if ((scope in opts_capability) + and (key in opts_value) + and (scope in opts_associate) + and (opts_associate[scope] == key)): + opts[key] = value + + return opts + + def _get_lun_params(self, volume, opts, src_size=None): + pool_name = volume_utils.extract_host(volume.host, level='pool') + params = { + 'NAME': huawei_utils.encode_name(volume.id), + 'PARENTID': self.client.get_pool_id(pool_name), + 'DESCRIPTION': volume.name, + 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), + 'CAPACITY': int(int(src_size) * constants.CAPACITY_UNIT if src_size + else huawei_utils.get_volume_size(volume)), + 'WRITEPOLICY': self.configuration.lun_write_type, + 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, + 'PREFETCHVALUE': self.configuration.lun_prefetch_value, + 'DATATRANSFERPOLICY': opts.get('policy', '0'), + } + + if opts['controllerid']: + params['OWNINGCONTROLLER'] = opts['controllerid'] + + if opts.get('dedup'): + params['ENABLESMARTDEDUP'] = opts['dedup'] + elif True not in self.support_func['dedup']: + params['ENABLESMARTDEDUP'] = False + + if opts.get('compression'): + params['ENABLECOMPRESSION'] = opts['compression'] + elif True not in self.support_func['compression']: + params['ENABLECOMPRESSION'] = False + + LOG.info('volume: %(volume)s, lun params: %(params)s.', + {'volume': volume.id, 'params': params}) + return params + + def _create_volume(self, lun_params): + # Create LUN on the array. + lun_info = self.client.create_lun(lun_params) + metadata = {'huawei_lun_id': lun_info['ID'], + 'huawei_sn': self.sn, + 'huawei_lun_wwn': lun_info['WWN']} + model_update = {'metadata': metadata} + + return lun_info, model_update + + def _create_base_type_volume(self, opts, volume, volume_type): + """Create volume and add some base type. + + Base type is the services won't conflict with the other service. + """ + lun_params = self._get_lun_params(volume, opts) + lun_info, model_update = self._create_volume(lun_params) + lun_id = lun_info['ID'] + + try: + qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) + if qos: + if not self.support_func.get('QoS_support'): + msg = (_("Can't support qos on the array")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + smart_qos = smartx.SmartQos(self.client) + smart_qos.add(qos, lun_id) + + smartpartition = smartx.SmartPartition(self.client) + smartpartition.add(opts, lun_id) + + smartcache = smartx.SmartCache(self.client) + smartcache.add(opts, lun_id) + except Exception as err: + self._delete_lun_with_check(lun_id) + msg = _('Create volume error. Because %s.') % six.text_type(err) + raise exception.VolumeBackendAPIException(data=msg) + + return lun_params, lun_info, model_update + + def _add_extend_type_to_volume(self, volume, volume_type, opts, lun_params, + lun_info, is_sync=False): + lun_id = lun_info['ID'] + lun_params.update({"CAPACITY": huawei_utils.get_volume_size(volume)}) + + qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) + if qos: + smart_qos = smartx.SmartQos(self.client) + smart_qos.add(qos, lun_id) + + smartpartition = smartx.SmartPartition(self.client) + smartpartition.add(opts, lun_id) + + smartcache = smartx.SmartCache(self.client) + smartcache.add(opts, lun_id) + + metro_id = None + if opts.get('hypermetro') == 'true': + metro = hypermetro.HuaweiHyperMetro( + self.client, self.rmt_client, self.configuration) + metro_id = metro.create_hypermetro(lun_id, lun_params, is_sync) + + if volume.group_id: + try: + metro.add_hypermetro_to_consistencygroup( + {'id': volume.group_id}, metro_id) + except Exception: + metro.delete_hypermetro(volume) + raise + + replica_info = {} + if opts.get('replication_enabled') == 'true': + replica_model = opts.get('replication_type') + replica_info = self.replica.create_replica(lun_info, replica_model) + + if volume.group_id: + try: + replicg = replication.ReplicaCG( + self.client, self.replica_client, self.configuration) + replicg.add_replica_to_group( + volume.group_id, + replica_info['replication_driver_data']) + except Exception: + self.replica.delete_replica( + volume, replica_info['replication_driver_data']) + raise + + return metro_id, replica_info + + def _create_volume_from_src(self, volume, src_obj, src_type, lun_params, + clone_pair_flag=None): + metadata = huawei_utils.get_volume_metadata(volume) + if strutils.bool_from_string(metadata.get('fastclone')): + if volume.volume_type_id != src_obj.volume_type_id: + msg = _("Volume type must be the same as source " + "for fast clone.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if src_type == objects.Volume: + src_id = self._check_volume_exist_on_array( + src_obj, constants.VOLUME_NOT_EXISTS_RAISE) + else: + src_id = self._check_snapshot_exist_on_array( + src_obj, constants.SNAPSHOT_NOT_EXISTS_RAISE) + + lun_info = self._create_volume_by_clone(src_id, lun_params) + elif clone_pair_flag: + clone_speed = self.configuration.lun_copy_speed + if src_type == objects.Volume: + src_id = self._check_volume_exist_on_array( + src_obj, constants.VOLUME_NOT_EXISTS_RAISE) + else: + src_id = self._check_snapshot_exist_on_array( + src_obj, constants.SNAPSHOT_NOT_EXISTS_RAISE) + lun_info = self._create_volume_by_clone_pair( + src_id, lun_params, clone_speed) + else: + copyspeed = metadata.get('copyspeed') + if not copyspeed: + copyspeed = self.configuration.lun_copy_speed + elif copyspeed not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("LUN copy speed is: %(speed)s. It should be between " + "%(low)s and %(high)s.") + % {"speed": copyspeed, + "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if src_type == objects.Volume: + vol_kwargs = { + 'id': src_obj.id, + 'provider_location': src_obj.provider_location, + } + snapshot_kwargs = { + 'id': six.text_type(uuid.uuid4()), + 'volume_id': src_obj.id, + 'volume': objects.Volume(**vol_kwargs), + } + + snapshot = objects.Snapshot(**snapshot_kwargs) + src_id = self._create_snapshot(snapshot) + else: + src_id = self._check_snapshot_exist_on_array( + src_obj, constants.SNAPSHOT_NOT_EXISTS_RAISE) + + try: + lun_info = self._create_volume_by_luncopy( + src_id, lun_params, copyspeed) + except Exception as err: + msg = _("Create volume by lun copy error. Reason: %s") % err + LOG.error(msg) + raise exception.VolumeBackendAPIException(msg) + finally: + if src_type == objects.Volume: + self._delete_snapshot(src_id) + + try: + expect_size = int(volume.size) * constants.CAPACITY_UNIT + if int(lun_info['CAPACITY']) < expect_size: + self.client.extend_lun(lun_info["ID"], expect_size) + lun_info = self.client.get_lun_info(lun_info["ID"]) + except Exception as err: + LOG.exception('Extend lun %(lun_id)s error. Reason is %(err)s' % + {"lun_id": lun_info['ID'], "err": err}) + self._delete_lun_with_check(lun_info['ID']) + raise + + return lun_info + + def _create_snapshot(self, snapshot): + snapshot_id = self._create_snapshot_base(snapshot) + + try: + self.client.activate_snapshot(snapshot_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Active snapshot %s failed, now deleting it.", + snapshot_id) + self.client.delete_snapshot(snapshot_id) + return snapshot_id + + def _delete_snapshot(self, snapshot_id): + self.client.stop_snapshot(snapshot_id) + self.client.delete_snapshot(snapshot_id) + + def _create_volume_by_clone(self, src_id, lun_params): + LOG.info('Create volume %s by clone from source %s.', + lun_params['NAME'], src_id) + + lun_info = self.client.create_clone_lun(src_id, lun_params['NAME']) + lun_id = lun_info['ID'] + + try: + expected_size = int(lun_params['CAPACITY']) + if int(lun_info['CAPACITY']) < expected_size: + self.client.extend_lun(lun_id, expected_size) + + self.client.split_clone_lun(lun_id) + except Exception: + LOG.exception('Split clone lun %s error.', lun_id) + self.client.delete_lun(lun_id) + raise + + lun_info = self.client.get_lun_info(lun_id) + return lun_info + + def _create_volume_by_luncopy(self, src_id, lun_params, copyspeed): + LOG.info('Create volume %s by luncopy from source %s.', + lun_params['NAME'], src_id) + + lun_info = self.client.create_lun(lun_params) + tgt_lun_id = lun_info['ID'] + + def _volume_ready(): + result = self.client.get_lun_info(tgt_lun_id) + return (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and + result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY) + + try: + huawei_utils.wait_for_condition( + _volume_ready, self.configuration.lun_ready_wait_interval, + self.configuration.lun_ready_wait_interval * 10) + self._copy_volume(src_id, tgt_lun_id, copyspeed) + except Exception: + LOG.exception('Copy lun from source %s error.', src_id) + self._delete_lun_with_check(tgt_lun_id) + raise + + return lun_info + + def _create_volume_by_clone_pair(self, src_id, lun_params, clone_speed): + LOG.info('Create volume %s by ClonePair from source %s.', + lun_params['NAME'], src_id) + lun_info = self.client.create_lun(lun_params) + tgt_id = lun_info['ID'] + + def _volume_ready(): + result = self.client.get_lun_info(tgt_id) + return (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and + result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY) + + try: + huawei_utils.wait_for_condition( + _volume_ready, self.configuration.lun_ready_wait_interval, + self.configuration.lun_ready_wait_interval * 10) + self._create_clone_pair(src_id, tgt_id, clone_speed) + except Exception: + LOG.exception('Copy lun from source %s error.', src_id) + self._delete_lun_with_check(tgt_id) + raise + return lun_info + + def _create_clone_pair(self, source_id, target_id, clone_speed): + clone_pair_id = self.client.create_clone_pair( + source_id, target_id, clone_speed) + + def _pair_sync_completed(): + clone_pair_info = self.client.get_clone_pair_info(clone_pair_id) + if clone_pair_info['copyStatus'] != constants.CLONE_STATUS_HEALTH: + msg = _("ClonePair %s is abnormal.") % clone_pair_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return (clone_pair_info['syncStatus'] in + constants.CLONE_STATUS_COMPLETE) + + self.client.sync_clone_pair(clone_pair_id) + huawei_utils.wait_for_condition( + _pair_sync_completed, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + self.client.delete_clone_pair(clone_pair_id) + + def _update_restricted_metadata(self, volume_id, metadata): + """Perform a database update to associate the specified information + + with the specified volume. + """ + ctxt = context.get_admin_context() + powervc_db_api.volume_restricted_metadata_update_or_create( + ctxt, volume_id, metadata) + + def _common_create_volume(self, volume, src_obj=None, src_type=None, + is_sync=False, src_size=None): + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if (opts.get('hypermetro') == 'true' + and opts.get('replication_enabled') == 'true'): + err_msg = _("Hypermetro and Replication can not be " + "used in the same volume_type.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + clone_pair_flag = huawei_utils.is_support_clone_pair(self.client) + lun_params = self._get_lun_params(volume, opts, src_size) + + if not src_obj: + lun_info = self.client.create_lun(lun_params) + else: + lun_info = self._create_volume_from_src( + volume, src_obj, src_type, lun_params, clone_pair_flag) + + try: + metro_id, replica_info = self._add_extend_type_to_volume( + volume, volume_type, opts, lun_params, lun_info, is_sync) + except Exception: + LOG.exception('Add extend feature to volume %s failed.', volume.id) + self._delete_lun_with_check(lun_info['ID']) + raise + + hypermetro = True if metro_id else False + + provider_location = huawei_utils.to_string( + huawei_lun_id=lun_info['ID'], huawei_sn=self.sn, + huawei_lun_wwn=lun_info['WWN'], hypermetro=hypermetro) + model_update = {'provider_location': provider_location} + model_update.update(replica_info) + metadata = {RESTRICTED_METADATA_VDISK_ID_KEY: lun_info['WWN'], + RESTRICTED_METADATA_VDISK_NAME_KEY: lun_info['WWN'], + RESTRICTED_METADATA_VDISK_UID_KEY: lun_info['WWN'], + } + self._update_restricted_metadata(volume.id, metadata) + + return model_update + + def create_volume(self, volume): + return self._common_create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + snapshot_id, __ = huawei_utils.get_snapshot_id(self.client, snapshot) + if not snapshot_id: + msg = _('Snapshot %s does not exist.') % snapshot.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_info = self.client.get_snapshot_info(snapshot_id) + if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: + msg = _("Failed to create volume from snapshot due to " + "snapshot %s is not activated.") % snapshot_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return self._common_create_volume(volume, snapshot, objects.Snapshot, + is_sync=True, + src_size=snapshot.volume_size) + + def create_cloned_volume(self, volume, src_vref): + self._check_volume_exist_on_array( + src_vref, constants.VOLUME_NOT_EXISTS_RAISE) + + return self._common_create_volume(volume, src_vref, objects.Volume, + is_sync=True, src_size=src_vref.size) + + def _delete_volume(self, volume, lun_id=None): + if not lun_id: + lun_id, lun_wwn = huawei_utils.get_volume_lun_id( + self.client, volume) + if not lun_id: + return + + lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) + if lun_group_ids and len(lun_group_ids) == 1: + self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) + + self.client.delete_lun(lun_id) + + def delete_volume(self, volume): + """Delete a volume. + + Three steps: + Firstly, remove associate from lungroup. + Secondly, remove associate from QoS policy. + Thirdly, remove the lun. + """ + metadata = huawei_utils.get_lun_metadata(volume) + if metadata.get('hypermetro'): + metro = hypermetro.HuaweiHyperMetro( + self.client, self.rmt_client, self.configuration) + try: + metro.delete_hypermetro(volume) + except exception.VolumeBackendAPIException as err: + LOG.error('Delete hypermetro error: %s.', err) + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_WARN) + if lun_id: + self._delete_volume(volume, lun_id) + raise + + # Delete a replication volume + replica_data = volume.replication_driver_data + if replica_data: + try: + self.replica.delete_replica(volume) + except exception.VolumeBackendAPIException as err: + with excutils.save_and_reraise_exception(): + LOG.exception("Delete replication error.") + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_WARN) + if lun_id: + self._delete_volume(volume, lun_id) + + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_WARN) + if not lun_id: + return + + qos_id = self.client.get_qosid_by_lunid(lun_id) + if qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(qos_id, lun_id) + self._delete_volume(volume, lun_id) + + def _delete_lun_with_check(self, lun_id, lun_wwn=None): + if not lun_id: + return + + if self.client.check_lun_exist(lun_id, lun_wwn): + qos_id = self.client.get_qosid_by_lunid(lun_id) + if qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(qos_id, lun_id) + + self.client.delete_lun(lun_id) + + def _is_lun_migration_complete(self, src_id, dst_id): + result = self.client.get_lun_migration_task() + found_migration_task = False + if 'data' not in result: + return False + + for item in result['data']: + if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): + found_migration_task = True + if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: + return True + if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: + msg = _("Lun migration error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not found_migration_task: + err_msg = _("Cannot find migration task.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return False + + def _is_lun_migration_exist(self, src_id, dst_id): + try: + result = self.client.get_lun_migration_task() + except Exception: + LOG.error("Get LUN migration error.") + return False + + if 'data' in result: + for item in result['data']: + if (src_id == item['PARENTID'] + and dst_id == item['TARGETLUNID']): + return True + return False + + def _migrate_lun(self, src_id, dst_id): + try: + self.client.create_lun_migration(src_id, dst_id) + + def _is_lun_migration_complete(): + return self._is_lun_migration_complete(src_id, dst_id) + + wait_interval = constants.MIGRATION_WAIT_INTERVAL + huawei_utils.wait_for_condition(_is_lun_migration_complete, + wait_interval, + self.configuration.lun_timeout) + # Clean up if migration failed. + except Exception as ex: + raise exception.VolumeBackendAPIException(data=ex) + finally: + if self._is_lun_migration_exist(src_id, dst_id): + self.client.delete_lun_migration(src_id, dst_id) + self._delete_lun_with_check(dst_id) + + LOG.debug("Migrate lun %s successfully.", src_id) + return True + + def _wait_volume_ready(self, lun_id): + wait_interval = self.configuration.lun_ready_wait_interval + + def _volume_ready(): + result = self.client.get_lun_info(lun_id) + if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH + and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): + return True + return False + + huawei_utils.wait_for_condition(_volume_ready, + wait_interval, + wait_interval * 10) + + def _get_original_status(self, volume): + return 'in-use' if volume.volume_attachment else 'available' + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status=None): + original_name = huawei_utils.get_lun_name(self.client, volume) + new_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( + self.client, new_volume) + description = volume['name'] + + org_metadata = huawei_utils.get_lun_metadata(volume) + new_metadata = huawei_utils.get_lun_metadata(new_volume) + + try: + if org_metadata.get('huawei_sn') == new_metadata.get('huawei_sn'): + src_id, src_wwn = huawei_utils.get_volume_lun_id( + self.client, volume) + src_lun_name = str(uuid.uuid4()) + src_lun_name = huawei_utils.encode_name(src_lun_name) + self.client.rename_lun(src_id, src_lun_name) + self.client.rename_lun(new_lun_id, + original_name, + description=description) + except exception.VolumeBackendAPIException: + LOG.error('Unable to rename lun %s on array.', new_lun_id) + return {'_name_id': new_volume.name_id, + 'provider_location': huawei_utils.to_string(**new_metadata) + } + + LOG.debug("Rename lun %(id)s to %(original_name)s successfully.", + {'id': new_lun_id, + 'original_name': original_name}) + + return {'_name_id': None, + 'provider_location': huawei_utils.to_string(**new_metadata)} + + def migrate_volume(self, ctxt, volume, host): + """Migrate a volume within the same array.""" + self._check_volume_exist_on_array(volume, + constants.VOLUME_NOT_EXISTS_RAISE) + + # NOTE(jlc): Replication volume can't migrate. But retype + # can remove replication relationship first then do migrate. + # So don't add this judgement into _check_migration_valid(). + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if (opts.get('hypermetro') == 'true' or + opts.get('replication_enabled') == 'true'): + return False, None + + return self._migrate_volume(volume, host) + + def _check_migration_valid(self, host, volume): + if 'pool_name' not in host['capabilities']: + return False + + target_device = host['capabilities']['location_info'] + + # Source and destination should be on same array. + if target_device != self.client.device_id: + return False + + # Same protocol should be used if volume is in-use. + protocol = self.configuration.san_protocol + if (host['capabilities']['storage_protocol'] != protocol + and self._get_original_status(volume) == 'in-use'): + return False + + pool_name = host['capabilities']['pool_name'] + if len(pool_name) == 0: + return False + + return True + + def _migrate_volume(self, volume, host, new_type=None): + if not self._check_migration_valid(host, volume): + return (False, None) + + type_id = volume.volume_type_id + + volume_type = None + if type_id: + volume_type = volume_types.get_volume_type(None, type_id) + + pool_name = host['capabilities']['pool_name'] + pools = self.client.get_all_pools() + pool_info = self.client.get_pool_info(pool_name, pools) + dst_volume_name = six.text_type(uuid.uuid4()) + + src_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + opts = None + if new_type: + # If new type exists, use new type. + new_specs = new_type['extra_specs'] + opts = self._get_volume_params_from_specs(new_specs) + if 'LUNType' not in opts: + opts['LUNType'] = self.configuration.lun_type + + if not opts: + opts = self._get_volume_params(volume_type) + + lun_info = self.client.get_lun_info(src_id) + lun_params = { + 'NAME': huawei_utils.encode_name(dst_volume_name), + 'PARENTID': pool_info['ID'], + 'DESCRIPTION': lun_info['DESCRIPTION'], + 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), + 'CAPACITY': lun_info['CAPACITY'], + 'WRITEPOLICY': lun_info['WRITEPOLICY'], + } + + if 'DATATRANSFERPOLICY' in lun_info: + lun_params['DATATRANSFERPOLICY'] = opts.get( + 'policy', lun_info['DATATRANSFERPOLICY']) + for k in ('PREFETCHPOLICY', 'PREFETCHVALUE', 'READCACHEPOLICY', + 'WRITECACHEPOLICY', 'OWNINGCONTROLLER'): + if k in lun_info: + lun_params[k] = lun_info[k] + + for key in list(lun_params): + if lun_params.get(key) == '--': + lun_params.pop(key, None) + + lun_info = self.client.create_lun(lun_params) + dst_id = lun_info['ID'] + self._wait_volume_ready(dst_id) + moved = self._migrate_lun(src_id, dst_id) + + return moved, {} + + def _check_volume_exist_on_array(self, volume, action, local=True): + """Check whether the volume exists on the array. + + If the volume exists on the array, return the LUN ID. + If not exists, raise or log warning. + """ + # Determine use which client, local or remote. + client = self.client if local else self.rmt_client + + # For manage scenario remote name would not same with local name + hypermetro_id = huawei_utils.get_hypermetro_id(volume) + if not local and hypermetro_id: + metro_info = self.rmt_client.get_hypermetro_by_id(hypermetro_id) + lun_id = metro_info['LOCALOBJID'] + lun_wwn = None + else: + # try to find LUN ID from volume. + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(client, volume) + + if not lun_id: + msg = _("Volume %s does not exist on the array." + ) % volume.id + if action == constants.VOLUME_NOT_EXISTS_WARN: + LOG.warning(msg) + if action == constants.VOLUME_NOT_EXISTS_RAISE: + raise exception.VolumeBackendAPIException(data=msg) + return + + if not lun_wwn: + LOG.debug("No LUN WWN recorded for volume %s.", volume.id) + + if not client.check_lun_exist(lun_id, lun_wwn): + msg = (_("Volume %s does not exist on the array.") + % volume.id) + if action == constants.VOLUME_NOT_EXISTS_WARN: + LOG.warning(msg) + if action == constants.VOLUME_NOT_EXISTS_RAISE: + raise exception.VolumeBackendAPIException(data=msg) + return + return lun_id + + def _extend_hypermetro_volume(self, volume, new_size): + lun_name = huawei_utils.get_lun_name(self.client, volume, True) + metro_info = self.client.get_hypermetro_by_lun_name(lun_name) + if not metro_info: + msg = _('Volume %s is not in hypermetro pair') % lun_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + metro_id = metro_info['ID'] + + metro = hypermetro.HuaweiHyperMetro( + self.client, self.rmt_client, self.configuration) + if metro_info['ISINCG'] == 'true': + metro.stop_consistencygroup(metro_info['CGID']) + else: + metro.check_metro_need_to_stop(metro_id) + + try: + self.rmt_client.extend_lun(metro_info['REMOTEOBJID'], new_size) + self.client.extend_lun(metro_info['LOCALOBJID'], new_size) + finally: + if metro_info['ISINCG'] == 'true': + self.client.sync_metrogroup(metro_info['CGID']) + else: + self.client.sync_hypermetro(metro_id) + + def _extend_replica_volume(self, pair_id, new_size): + replica_info = self.client.get_pair_by_id(pair_id) + if replica_info['ISINCG'] == 'true': + cg_info = self.client.get_replicg_info(replica_info['CGID']) + replica_cg = replication.ReplicaCG( + self.client, self.replica_client, self.configuration) + replica_cg.split_replicg(cg_info) + else: + self.replica.split_replica(pair_id) + + try: + self.replica_client.extend_lun(replica_info['REMOTERESID'], + new_size) + self.client.extend_lun(replica_info['LOCALRESID'], new_size) + finally: + if replica_info['ISINCG'] == 'true': + self.client.sync_replicg(replica_info['CGID']) + else: + self.client.sync_pair(pair_id) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + lun_info = self.client.get_lun_info(lun_id) + + old_size = int(lun_info.get('CAPACITY')) + new_size = int(new_size) * units.Gi // 512 + if new_size == old_size: + LOG.info("New size is equal to the real size from backend" + " storage, no need to extend." + " realsize: %(oldsize)s, newsize: %(newsize)s.", + {'oldsize': old_size, + 'newsize': new_size}) + return + if new_size < old_size: + msg = (_("New size should be bigger than the real size from " + "backend storage." + " realsize: %(oldsize)s, newsize: %(newsize)s."), + {'oldsize': old_size, + 'newsize': new_size}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info( + 'Extend volume: %(id)s, ' + 'oldsize: %(oldsize)s, newsize: %(newsize)s.', + {'id': volume.id, + 'oldsize': old_size, + 'newsize': new_size}) + + metadata = huawei_utils.get_lun_metadata(volume) + if metadata.get('hypermetro'): + self._extend_hypermetro_volume(volume, new_size) + elif volume.replication_driver_data: + replica_data = replication.get_replication_driver_data(volume) + self._extend_replica_volume(replica_data['pair_id'], new_size) + else: + self.client.extend_lun(lun_id, new_size) + + def _create_snapshot_base(self, snapshot): + volume = snapshot.volume + if not volume: + msg = (_("Can't get volume id from snapshot, snapshot: %(id)s") + % {"id": snapshot.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + snapshot_name = huawei_utils.encode_name(snapshot.id) + snapshot_description = snapshot.id + try: + snapshot_info = self.client.create_snapshot(lun_id, + snapshot_name, + snapshot_description) + except Exception as err: + with excutils.save_and_reraise_exception(): + LOG.error("Create snapshot %s failed, reason is %s, now " + "deleting it.", snapshot_name, err) + snapshot_id = self.client.get_snapshot_id_by_name( + snapshot_name) + if snapshot_id: + self.client.delete_snapshot(snapshot_id) + + snapshot_id = snapshot_info['ID'] + + def _snapshot_ready(): + result = self.client.get_snapshot_info(snapshot_id) + if result['HEALTHSTATUS'] != constants.STATUS_HEALTH: + err_msg = _("The snapshot created is fault.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if (result['RUNNINGSTATUS'] in (constants.STATUS_SNAPSHOT_INACTIVE, + constants.STATUS_SNAPSHOT_ACTIVE)): + return True + + return False + + huawei_utils.wait_for_condition(_snapshot_ready, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + return snapshot_id + + def create_snapshot(self, snapshot): + snapshot_id = self._create_snapshot_base(snapshot) + try: + self.client.activate_snapshot(snapshot_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Active snapshot %s failed, now deleting it.", + snapshot_id) + self.client.delete_snapshot(snapshot_id) + + snapshot_info = self.client.get_snapshot_info(snapshot_id) + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_info['WWN']) + return {'provider_location': location} + + def delete_snapshot(self, snapshot): + snapshot_id = self._check_snapshot_exist_on_array( + snapshot, constants.SNAPSHOT_NOT_EXISTS_WARN) + if not snapshot_id: + return + self.client.stop_snapshot(snapshot_id) + self.client.delete_snapshot(snapshot_id) + + def _check_snapshot_exist_on_array(self, snapshot, action): + snapshot_id, snapshot_wwn = huawei_utils.get_snapshot_id( + self.client, snapshot) + if not (snapshot_id and + self.client.check_snapshot_exist(snapshot_id, snapshot_wwn)): + msg = (_("Snapshot %s does not exist on the array.") + % snapshot.id) + if action == constants.SNAPSHOT_NOT_EXISTS_WARN: + LOG.warning(msg) + if action == constants.SNAPSHOT_NOT_EXISTS_RAISE: + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return + return snapshot_id + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type.""" + LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " + "diff=%(diff)s, host=%(host)s.", {'id': volume.id, + 'new_type': new_type, + 'diff': diff, + 'host': host}) + self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + + # Check what changes are needed + migration, change_opts, lun_id = self.determine_changes_when_retype( + volume, new_type, host) + + model_update = {} + + replica_type_change = change_opts.get('replication_type') + if change_opts.get('delete_replica') == 'true': + self.replica.delete_replica(volume) + model_update.update({'replication_status': 'disabled', + 'replication_driver_data': None}) + elif (replica_type_change and + replica_type_change[0] != replica_type_change[1]): + msg = _("Cannot retype replication model.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if change_opts.get('delete_hypermetro') == 'true': + metro = hypermetro.HuaweiHyperMetro( + self.client, self.rmt_client, self.configuration) + metro.delete_hypermetro(volume) + metadata = huawei_utils.get_lun_metadata(volume) + metadata['hypermetro'] = False + model_update['provider_location'] = huawei_utils.to_string( + **metadata) + + if migration: + LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " + "change %(change_opts)s.", + {"lun_id": lun_id, "change_opts": change_opts}) + if not self._migrate_volume(volume, host, new_type): + LOG.warning("Storage-assisted migration failed during " + "retype.") + return False, model_update + + # Modify lun to change policy + metro_info, replica_info = self.modify_lun( + lun_id, change_opts, migration) + + if metro_info: + metadata = huawei_utils.get_lun_metadata(volume) + metadata.update(metro_info) + model_update['provider_location'] = huawei_utils.to_string( + **metadata) + if replica_info: + model_update.update(replica_info) + + return True, model_update + + def modify_lun(self, lun_id, change_opts, migration): + data = {} + if change_opts.get('dedup') is not None: + data['ENABLESMARTDEDUP'] = change_opts['dedup'] + if change_opts.get('compression') is not None: + data['ENABLECOMPRESSION'] = change_opts['compression'] + if data: + self.client.update_lun(lun_id, data) + LOG.info("Retype LUN(id: %(lun_id)s) dedup & compression success.", + {'lun_id': lun_id}) + + if change_opts.get('partitionid'): + old, new = change_opts['partitionid'] + old_id = old[0] + old_name = old[1] + new_id = new[0] + new_name = new[1] + if not migration and old_id: + self.client.remove_lun_from_partition(lun_id, old_id) + if new_id: + self.client.add_lun_to_partition(lun_id, new_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) success.", + {"lun_id": lun_id, + "old_id": old_id, "old_name": old_name, + "new_id": new_id, "new_name": new_name}) + + if change_opts.get('cacheid'): + old, new = change_opts['cacheid'] + old_id = old[0] + old_name = old[1] + new_id = new[0] + new_name = new[1] + if not migration and old_id: + self.client.remove_lun_from_cache(lun_id, old_id) + if new_id: + self.client.add_lun_to_cache(lun_id, new_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) successfully.", + {'lun_id': lun_id, + 'old_id': old_id, "old_name": old_name, + 'new_id': new_id, "new_name": new_name}) + + if change_opts.get('policy'): + old_policy, new_policy = change_opts['policy'] + self.client.change_lun_smarttier(lun_id, new_policy) + LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " + "%(old_policy)s to %(new_policy)s success.", + {'lun_id': lun_id, + 'old_policy': old_policy, + 'new_policy': new_policy}) + + if change_opts.get('qos'): + old_qos, new_qos = change_opts['qos'] + old_qos_id = old_qos[0] + old_qos_value = old_qos[1] + if old_qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(old_qos_id, lun_id) + if new_qos: + smart_qos = smartx.SmartQos(self.client) + smart_qos.add(new_qos, lun_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " + "%(old_qos_value)s to %(new_qos)s success.", + {'lun_id': lun_id, + 'old_qos_value': old_qos_value, + 'new_qos': new_qos}) + + metro_info = {} + if change_opts.get('add_hypermetro') == 'true': + metro = hypermetro.HuaweiHyperMetro( + self.client, self.rmt_client, self.configuration) + __, lun_params = self.get_lun_specs(lun_id) + metro_info = metro.create_hypermetro(lun_id, lun_params, + is_sync=True) + + replica_info = {} + if change_opts.get('add_replica') == 'true': + lun_info = self.client.get_lun_info(lun_id) + replica_info = self.replica.create_replica( + lun_info, change_opts['replication_type'][1]) + + return metro_info, replica_info + + def get_lun_specs(self, lun_id): + lun_opts = { + 'policy': None, + 'partitionid': None, + 'cacheid': None, + 'LUNType': None, + 'dedup': None, + 'compression': None, + } + + lun_info = self.client.get_lun_info(lun_id) + lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) + if lun_info.get('DATATRANSFERPOLICY'): + lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] + if lun_info.get('SMARTCACHEPARTITIONID'): + lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] + if lun_info.get('CACHEPARTITIONID'): + lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] + if lun_info.get('ENABLESMARTDEDUP'): + lun_opts['dedup'] = lun_info['ENABLESMARTDEDUP'] + if lun_info.get('ENABLECOMPRESSION'): + lun_opts['compression'] = lun_info['ENABLECOMPRESSION'] + + lun_params = { + 'NAME': lun_info['NAME'], + 'PARENTID': lun_info['PARENTID'], + 'DESCRIPTION': lun_info['DESCRIPTION'], + 'ALLOCTYPE': lun_info['ALLOCTYPE'], + 'CAPACITY': lun_info['CAPACITY'], + 'WRITEPOLICY': lun_info['WRITEPOLICY'], + 'EXPOSEDTOINITIATOR': lun_info['EXPOSEDTOINITIATOR'], + 'ENABLESMARTDEDUP': lun_info['ENABLESMARTDEDUP'], + 'ENABLECOMPRESSION': lun_info['ENABLECOMPRESSION'], + } + + for k in ('DATATRANSFERPOLICY', 'PREFETCHPOLICY', 'PREFETCHVALUE', + 'READCACHEPOLICY', 'WRITECACHEPOLICY'): + if k in lun_info: + lun_params[k] = lun_info[k] + + # Check whether the LUN exists in a HyperMetroPair. + if self.support_func.get('hypermetro'): + try: + hypermetro_pairs = self.client.get_hypermetro_pairs() + except exception.VolumeBackendAPIException: + hypermetro_pairs = [] + LOG.info("Can't get hypermetro info, pass the check.") + + for pair in hypermetro_pairs: + if pair.get('LOCALOBJID') == lun_id: + lun_opts['hypermetro'] = 'true' + + if 'REMOTEREPLICATIONIDS' in lun_info: + replica_ids = json.loads(lun_info['REMOTEREPLICATIONIDS']) + if replica_ids: + lun_opts['replication_enabled'] = 'true' + + return lun_opts, lun_params + + def _check_capability_support(self, new_opts, new_type): + new_cache_name = new_opts['cachename'] + if new_cache_name: + if not self.support_func.get('smartcache'): + msg = (_( + "Can't support cache on the array, cache name is: " + "%(name)s.") % {'name': new_cache_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_partition_name = new_opts['partitionname'] + if new_partition_name: + if not self.support_func.get('smartpartition'): + msg = (_( + "Can't support partition on the array, partition name is: " + "%(name)s.") % {'name': new_partition_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if new_opts['policy']: + if (not self.support_func.get('smarttier') + and new_opts['policy'] != '0'): + msg = (_("Can't support tier on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) + if not self.support_func.get('QoS_support'): + if new_qos: + msg = (_("Can't support qos on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _check_needed_changes(self, lun_id, old_opts, new_opts, + change_opts, new_type): + new_cache_id = None + new_cache_name = new_opts['cachename'] + if new_cache_name: + if self.support_func.get('smartcache'): + new_cache_id = self.client.get_cache_id_by_name( + new_cache_name) + if new_cache_id is None: + msg = (_( + "Can't find cache name on the array, cache name is: " + "%(name)s.") % {'name': new_cache_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_partition_id = None + new_partition_name = new_opts['partitionname'] + if new_partition_name: + if self.support_func.get('smartpartition'): + new_partition_id = self.client.get_partition_id_by_name( + new_partition_name) + if new_partition_id is None: + msg = (_( + "Can't find partition name on the array, partition name " + "is: %(name)s.") % {'name': new_partition_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # smarttier + if old_opts['policy'] != new_opts['policy']: + if not (old_opts['policy'] == '--' + and new_opts['policy'] is None): + change_opts['policy'] = (old_opts['policy'], + new_opts['policy']) + + # smartcache + old_cache_id = old_opts['cacheid'] + if old_cache_id == '--': + old_cache_id = None + if old_cache_id != new_cache_id: + old_cache_name = None + if self.support_func.get('smartcache'): + if old_cache_id: + cache_info = self.client.get_cache_info_by_id( + old_cache_id) + old_cache_name = cache_info['NAME'] + change_opts['cacheid'] = ([old_cache_id, old_cache_name], + [new_cache_id, new_cache_name]) + + # smartpartition + old_partition_id = old_opts['partitionid'] + if old_partition_id == '--': + old_partition_id = None + if old_partition_id != new_partition_id: + old_partition_name = None + if self.support_func.get('smartpartition'): + if old_partition_id: + partition_info = self.client.get_partition_info_by_id( + old_partition_id) + old_partition_name = partition_info['NAME'] + + change_opts['partitionid'] = ([old_partition_id, + old_partition_name], + [new_partition_id, + new_partition_name]) + + # smartqos + new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) + if not self.support_func.get('QoS_support'): + if new_qos: + msg = (_("Can't support qos on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + old_qos_id = self.client.get_qosid_by_lunid(lun_id) + old_qos = self._get_qos_specs_from_array(old_qos_id) + if old_qos != new_qos: + change_opts['qos'] = ([old_qos_id, old_qos], new_qos) + + # hypermetro + if new_opts.get('hypermetro') == 'true': + if old_opts.get('hypermetro') != 'true': + change_opts['add_hypermetro'] = 'true' + else: + if old_opts.get('hypermetro') == 'true': + change_opts['delete_hypermetro'] = 'true' + + if new_opts.get('replication_enabled') == 'true': + if old_opts.get('replication_enabled') != 'true': + change_opts['add_replica'] = 'true' + else: + if old_opts.get('replication_enabled') == 'true': + change_opts['delete_replica'] = 'true' + + # dedup + if new_opts.get('dedup') == 'true': + if old_opts.get('dedup') != 'true': + change_opts['dedup'] = True + else: + if old_opts.get('dedup') == 'true': + change_opts['dedup'] = False + + # compression + if new_opts.get('compression') == 'true': + if old_opts.get('compression') != 'true': + change_opts['compression'] = True + else: + if old_opts.get('compression') == 'true': + change_opts['compression'] = False + + return change_opts + + def determine_changes_when_retype(self, volume, new_type, host): + migration = False + change_opts = { + 'policy': None, + 'partitionid': None, + 'cacheid': None, + 'qos': None, + 'host': None, + 'LUNType': None, + 'replication_enabled': None, + 'replication_type': None, + 'hypermetro': None, + 'dedup': None, + 'compression': None, + } + + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + old_opts, lun_params = self.get_lun_specs(lun_id) + + new_specs = new_type['extra_specs'] + new_opts = self._get_volume_params_from_specs(new_specs) + + if 'LUNType' not in new_opts: + new_opts['LUNType'] = self.configuration.lun_type + + if volume.host != host['host']: + migration = True + change_opts['host'] = (volume.host, host['host']) + if old_opts['LUNType'] != new_opts['LUNType']: + migration = True + change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) + + volume_type = self._get_volume_type(volume) + volume_opts = self._get_volume_params(volume_type) + if (volume_opts['replication_enabled'] == 'true' + or new_opts['replication_enabled'] == 'true'): + # If replication_enabled changes, + # then replication_type in change_opts will be set. + change_opts['replication_enabled'] = ( + volume_opts['replication_enabled'], + new_opts['replication_enabled']) + + change_opts['replication_type'] = (volume_opts['replication_type'], + new_opts['replication_type']) + + if (volume_opts.get('hypermetro') == 'true' + or new_opts.get('hypermetro') == 'true'): + change_opts['hypermetro'] = ( + volume_opts.get('hypermetro', 'false'), + new_opts.get('hypermetro', 'false') + ) + + change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, + change_opts, new_type) + + if (change_opts.get('add_hypermetro') == 'true' + or change_opts.get('delete_hypermetro') == 'true'): + if lun_params.get('EXPOSEDTOINITIATOR') == 'true': + msg = _("Cann't add hypermetro to the volume in use.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug("Determine changes when retype. Migration: " + "%(migration)s, change_opts: %(change_opts)s.", + {'migration': migration, 'change_opts': change_opts}) + return migration, change_opts, lun_id + + def _get_qos_specs_from_array(self, qos_id): + qos = {} + qos_info = {} + if qos_id: + qos_info = self.client.get_qos_info(qos_id) + + qos_key = [k.upper() for k in constants.QOS_SPEC_KEYS] + for key, value in qos_info.items(): + key = key.upper() + if key in qos_key: + if key == 'LATENCY' and value == '0': + continue + else: + qos[key] = value + return qos + + def create_export(self, context, volume, connector=None): + """Export a volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_export_snapshot(self, context, snapshot, connector): + """Exports the snapshot.""" + pass + + def remove_export_snapshot(self, context, snapshot): + """Removes an export for a snapshot.""" + pass + + def backup_use_temp_snapshot(self): + # This config option has a default to be False, So just return it. + return self.configuration.safe_get("backup_use_temp_snapshot") + + def _copy_volume(self, src_lun, tgt_lun, copyspeed): + luncopy_id = self.client.create_luncopy(src_lun, tgt_lun, copyspeed) + + def _luncopy_complete(): + luncopy_info = self.client.get_luncopy_info(luncopy_id) + if not luncopy_info: + msg = (_("Failed to get luncopy %s by luncopy id.") + % luncopy_id) + raise exception.VolumeBackendAPIException(data=msg) + if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: + # luncopy_info['status'] means for the running status of + # the luncopy. If luncopy_info['status'] is equal to '40', + # this luncopy is completely ready. + return True + elif luncopy_info['state'] != constants.STATUS_HEALTH: + # luncopy_info['state'] means for the healthy status of the + # luncopy. If luncopy_info['state'] is not equal to '1', + # this means that an error occurred during the LUNcopy + # operation and we should abort it. + err_msg = _( + 'An error occurred during the LUNcopy operation. ' + 'LUNcopy name: %(luncopyname)s. ' + 'LUNcopy status: %(luncopystatus)s. ' + 'LUNcopy state: %(luncopystate)s.' + ) % {'luncopyname': luncopy_id, + 'luncopystatus': luncopy_info['status'], + 'luncopystate': luncopy_info['state']} + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + try: + self.client.start_luncopy(luncopy_id) + huawei_utils.wait_for_condition( + _luncopy_complete, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + finally: + self.client.delete_luncopy(luncopy_id) + + def _check_lun_valid_for_manage(self, lun_info, external_ref): + lun_id = lun_info.get('ID') + + # Check whether the LUN is already in LUN group. + if lun_info.get('ISADD2LUNGROUP') == 'true': + msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " + "group.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN is Normal. + if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: + msg = _("Can't import LUN %s to Cinder. LUN status is not " + "normal.") % lun_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a HyperMetroPair. + if self.support_func.get('hypermetro'): + try: + hypermetro_pairs = self.client.get_hypermetro_pairs() + except exception.VolumeBackendAPIException: + hypermetro_pairs = [] + LOG.debug("Can't get hypermetro info, pass the check.") + + for pair in hypermetro_pairs: + if pair.get('LOCALOBJID') == lun_id: + msg = (_("Can't import LUN %s to Cinder. Already exists " + "in a HyperMetroPair.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a SplitMirror. + if self.support_func.get('splitmirror'): + try: + split_mirrors = self.client.get_split_mirrors() + except exception.VolumeBackendAPIException as ex: + if re.search('License is unavailable', ex.msg): + # Can't check whether the LUN has SplitMirror with it, + # just pass the check and log it. + split_mirrors = [] + LOG.warning('No license for SplitMirror.') + else: + msg = _("Failed to get SplitMirror.") + raise exception.VolumeBackendAPIException(data=msg) + + for mirror in split_mirrors: + try: + target_luns = self.client.get_target_luns(mirror.get('ID')) + except exception.VolumeBackendAPIException: + msg = _("Failed to get target LUN of SplitMirror.") + raise exception.VolumeBackendAPIException(data=msg) + + if ((mirror.get('PRILUNID') == lun_id) + or (lun_id in target_luns)): + msg = (_("Can't import LUN %s to Cinder. Already exists " + "in a SplitMirror.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a migration task. + try: + migration_tasks = self.client.get_migration_task() + except exception.VolumeBackendAPIException as ex: + if re.search('License is unavailable', ex.msg): + # Can't check whether the LUN has migration task with it, + # just pass the check and log it. + migration_tasks = [] + LOG.warning('No license for migration.') + else: + msg = _("Failed to get migration task.") + raise exception.VolumeBackendAPIException(data=msg) + + for migration in migration_tasks: + if lun_id in (migration.get('PARENTID'), + migration.get('TARGETLUNID')): + msg = (_("Can't import LUN %s to Cinder. Already exists in a " + "migration task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a LUN copy task. + if self.support_func.get('luncopy'): + lun_copy = lun_info.get('LUNCOPYIDS') + if lun_copy and lun_copy[1:-1]: + msg = (_("Can't import LUN %s to Cinder. Already exists in " + "a LUN copy task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a remote replication task. + rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') + if rmt_replication and rmt_replication[1:-1]: + msg = (_("Can't import LUN %s to Cinder. Already exists in " + "a remote replication task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + def manage_existing(self, volume, external_ref): + """Manage an existing volume on the backend storage.""" + # Check whether the LUN is belonged to the specified pool. + pool = volume_utils.extract_host(volume.host, 'pool') + LOG.debug("Pool specified is: %s.", pool) + lun_info = self._get_lun_info_by_ref(external_ref) + lun_id = lun_info.get('ID') + description = lun_info.get('DESCRIPTION', '') + if len(description) <= ( + constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): + description = volume.name + ' ' + description + + lun_pool = lun_info.get('PARENTNAME') + LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", + {"lun": lun_id, "pool": lun_pool}) + if pool != lun_pool: + msg = (_("The specified LUN does not belong to the given " + "pool: %s.") % pool) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check other stuffs to determine whether this LUN can be imported. + self._check_lun_valid_for_manage(lun_info, external_ref) + + if volume.volume_type_id: + old_opts, __ = self.get_lun_specs(lun_id) + volume_type = volume_types.get_volume_type( + None, volume.volume_type_id) + new_specs = volume_type.get('extra_specs') + new_opts = self._get_volume_params_from_specs(new_specs) + if ('LUNType' in new_opts and + old_opts['LUNType'] != new_opts['LUNType']): + msg = (_("Can't import LUN %s to Cinder. " + "LUN type mismatched.") % lun_id) + raise exception.ManageExistingVolumeTypeMismatch(reason=msg) + + if (new_opts.get('dedup') and + old_opts['dedup'] != new_opts['dedup']): + msg = (_("Can't import LUN %s to Cinder. " + "Dedup function mismatched.") % lun_id) + raise exception.ManageExistingVolumeTypeMismatch(reason=msg) + + if (new_opts.get('compression') and + old_opts['compression'] != new_opts['compression']): + msg = (_("Can't import LUN %s to Cinder. " + "Compression function mismatched.") % lun_id) + raise exception.ManageExistingVolumeTypeMismatch(reason=msg) + + self._check_capability_support(new_opts, volume_type) + + # Rename the LUN to make it manageable for Cinder. + new_name = huawei_utils.encode_name(volume.id) + LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", + {'old_name': lun_info.get('NAME'), + 'new_name': new_name}) + self.client.rename_lun(lun_id, new_name, description) + + model_update = {} + provider_location = {'huawei_lun_id': lun_id, + 'huawei_sn': self.sn, + 'huawei_lun_wwn': lun_info['WWN']} + + # Handle volume type if specified. + if volume.volume_type_id: + change_opts = { + 'policy': None, 'partitionid': None, 'cacheid': None, + 'qos': None, + 'replication_type': (constants.REPLICA_SYNC_MODEL, + new_opts['replication_type']), + } + change_opts = self._check_needed_changes( + lun_id, old_opts, new_opts, change_opts, volume_type) + + metro_info, replica_info = self.modify_lun( + lun_id, change_opts, False) + if metro_info: + provider_location.update(metro_info) + if replica_info: + model_update.update(replica_info) + + model_update['provider_location'] = huawei_utils.to_string( + **provider_location) + return model_update + + def _get_lun_info_by_ref(self, external_ref): + LOG.debug("Get external_ref: %s", external_ref) + name = external_ref.get('source-name') + id = external_ref.get('source-id') + if not (name or id): + msg = _('Must specify source-name or source-id.') + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + lun_id = id or self.client.get_lun_id_by_name(name) + if not lun_id: + msg = _("Can't find LUN on the array, please check the " + "source-name or source-id.") + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + lun_info = self.client.get_lun_info(lun_id) + return lun_info + + def unmanage(self, volume): + """Export Huawei volume from Cinder.""" + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_WARN) + if not lun_id: + return + + # Remove volume uuid from array lun description. + lun_info = self.client.get_lun_info(lun_id) + description = lun_info.get('DESCRIPTION', '') + des_list = description.split(volume.name) + des = ' '.join(des.strip() for des in des_list) + self.client.update_obj_desc(lun_id, des) + + LOG.debug("Unmanage volume: %s.", volume.id) + + def manage_existing_get_size(self, volume, external_ref): + """Get the size of the existing volume.""" + lun_info = self._get_lun_info_by_ref(external_ref) + size = float(lun_info.get('CAPACITY')) // constants.CAPACITY_UNIT + remainder = float(lun_info.get('CAPACITY')) % constants.CAPACITY_UNIT + if int(remainder) > 0: + msg = _("Volume size must be multiple of 1 GB.") + raise exception.VolumeBackendAPIException(data=msg) + return int(size) + + def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): + snapshot_id = snapshot_info.get('ID') + + # Check whether the snapshot is normal. + if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: + msg = _("Can't import snapshot %s to Cinder. " + "Snapshot status is not normal" + " or running status is not online.") % snapshot_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': + msg = _("Can't import snapshot %s to Cinder. " + "Snapshot is exposed to initiator.") % snapshot_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + def _get_snapshot_info_by_ref(self, external_ref): + LOG.debug("Get snapshot external_ref: %s.", external_ref) + name = external_ref.get('source-name') + id = external_ref.get('source-id') + if not (name or id): + msg = _('Must specify snapshot source-name or source-id.') + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + snapshot_id = id or self.client.get_snapshot_id_by_name(name) + if not snapshot_id: + msg = _("Can't find snapshot on array, please check the " + "source-name or source-id.") + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + snapshot_info = self.client.get_snapshot_info(snapshot_id) + return snapshot_info + + def manage_existing_snapshot(self, snapshot, existing_ref): + snapshot_info = self._get_snapshot_info_by_ref(existing_ref) + snapshot_id = snapshot_info.get('ID') + + parent_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( + self.client, snapshot.volume) + if parent_lun_id != snapshot_info.get('PARENTID'): + msg = (_("Can't import snapshot %s to Cinder. " + "Snapshot doesn't belong to volume."), snapshot_id) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + + # Check whether this snapshot can be imported. + self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) + + # Add snapshot uuid to array snapshot description and + # keep the original array snapshot description. + description = snapshot_info.get('DESCRIPTION', '') + if len(description) <= ( + constants.MAX_VOL_DESCRIPTION - len(snapshot.id) - 1): + description = snapshot.id + ' ' + description + + # Rename the snapshot to make it manageable for Cinder. + snapshot_name = huawei_utils.encode_name(snapshot.id) + self.client.rename_snapshot(snapshot_id, snapshot_name, description) + if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: + self.client.activate_snapshot(snapshot_id) + + LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", + {'old_name': snapshot_info.get('NAME'), + 'new_name': snapshot_name}) + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_info['WWN']) + return {'provider_location': location} + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Get the size of the existing snapshot.""" + snapshot_info = self._get_snapshot_info_by_ref(existing_ref) + size = (float(snapshot_info.get('USERCAPACITY')) + // constants.CAPACITY_UNIT) + remainder = (float(snapshot_info.get('USERCAPACITY')) + % constants.CAPACITY_UNIT) + if int(remainder) > 0: + msg = _("Snapshot size must be multiple of 1 GB.") + raise exception.VolumeBackendAPIException(data=msg) + return int(size) + + def unmanage_snapshot(self, snapshot): + """Unmanage the specified snapshot from Cinder management.""" + snapshot_id = self._check_snapshot_exist_on_array( + snapshot, constants.SNAPSHOT_NOT_EXISTS_WARN) + if not snapshot_id: + return + + # Remove snapshot uuid from array lun description. + snapshot_info = self.client.get_snapshot_info(snapshot_id) + description = snapshot_info.get('DESCRIPTION', '') + des_list = description.split(snapshot.id) + des = ' '.join(des.strip() for des in des_list) + self.client.update_obj_desc(snapshot_id, des, constants.SNAPSHOT_TYPE) + + LOG.debug("Unmanage snapshot: %s.", snapshot.id) + + def remove_host_with_check(self, host_id, client): + wwns_in_host = ( + client.get_host_fc_initiators(host_id)) + iqns_in_host = ( + client.get_host_iscsi_initiators(host_id)) + if not (wwns_in_host or iqns_in_host or + client.is_host_associated_to_hostgroup(host_id)): + client.remove_host(host_id) + + def _get_group_type(self, group): + opts = [] + for vol_type in group.volume_types: + specs = vol_type.extra_specs + opts.append(self._get_volume_params_from_specs(specs)) + + return opts + + def _check_group_type_support(self, opts, vol_type): + if not opts: + return False + + for opt in opts: + if opt.get(vol_type) == 'true': + return True + + return False + + def _get_group_type_value(self, opts, vol_type): + if not opts: + return None + + for opt in opts: + if vol_type in opt: + return opt[vol_type] + + def create_group(self, context, group): + """Creates a group.""" + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = {'status': fields.GroupStatus.AVAILABLE} + opts = self._get_group_type(group) + if (self._check_group_type_support(opts, 'hypermetro') and + self._check_group_type_support(opts, 'replication_enabled')): + err_msg = _("Hypermetro and Replication can not be " + "used in the same volume_type.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if self._check_group_type_support(opts, 'hypermetro'): + if not self.check_local_func_support("HyperMetro_ConsistentGroup"): + msg = (_("Can't create consistency group, array not " + "support hypermetro consistentgroup, " + "group id: %(group_id)s.") + % {"group_id": group.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.check_rmt_func_support("HyperMetro_ConsistentGroup"): + msg = (_("Can't create consistency group, remote array " + "not support hypermetro consistentgroup, " + "group id: %(group_id)s.") + % {"group_id": group.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.create_consistencygroup(group) + return model_update + + if self._check_group_type_support(opts, 'replication_enabled'): + if not self.check_local_func_support("CONSISTENTGROUP"): + msg = (_("Can't create consistency group, array not " + "support replication consistentgroup, " + "group id: %(group_id)s.") + % {"group_id": group.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.check_replica_func_support("CONSISTENTGROUP"): + msg = (_("Can't create consistency group, remote array " + "not support replication consistentgroup, " + "group id: %(group_id)s.") + % {"group_id": group.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + replica_model = self._get_group_type_value( + opts, 'replication_type') + if not replica_model: + replica_model = constants.REPLICA_ASYNC_MODEL + replicg = replication.ReplicaCG(self.client, + self.replica_client, + self.configuration) + replicg.create(group, replica_model) + return model_update + + # Array will create CG at create_cgsnapshot time. Cinder will + # maintain the CG and volumes relationship in the db. + return model_update + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = self.create_group(context, group) + volumes_model_update = [] + delete_snapshots = False + + if not snapshots and source_vols: + snapshots = [] + for src_vol in source_vols: + vol_kwargs = { + 'id': src_vol.id, + 'provider_location': src_vol.provider_location, + } + snapshot_kwargs = {'id': six.text_type(uuid.uuid4()), + 'volume': objects.Volume(**vol_kwargs), + 'volume_size': src_vol.size} + snapshot = objects.Snapshot(**snapshot_kwargs) + snapshots.append(snapshot) + + snapshots_model_update = self._create_group_snapshot(snapshots) + for i, model in enumerate(snapshots_model_update): + snapshot = snapshots[i] + snapshot.provider_location = model['provider_location'] + + delete_snapshots = True + + if snapshots: + for i, vol in enumerate(volumes): + snapshot = snapshots[i] + vol_model_update = self.create_volume_from_snapshot( + vol, snapshot) + vol_model_update.update({'id': vol.id}) + volumes_model_update.append(vol_model_update) + + if delete_snapshots: + self._delete_group_snapshot(snapshots) + + return model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + opts = self._get_group_type(group) + model_update = {'status': fields.GroupStatus.DELETED} + volumes_model_update = [] + + if self._check_group_type_support(opts, 'hypermetro'): + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.delete_consistencygroup(context, group, volumes) + + if self._check_group_type_support(opts, 'replication_enabled'): + replicg = replication.ReplicaCG(self.client, + self.replica_client, + self.configuration) + replicg.delete(group, volumes) + + for volume in volumes: + volume_model_update = {'id': volume.id} + try: + self.delete_volume(volume) + except Exception: + LOG.exception('Delete volume %s failed.', volume) + volume_model_update.update({'status': 'error_deleting'}) + else: + volume_model_update.update({'status': 'deleted'}) + + volumes_model_update.append(volume_model_update) + + return model_update, volumes_model_update + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = {'status': fields.GroupStatus.AVAILABLE} + opts = self._get_group_type(group) + if self._check_group_type_support(opts, 'hypermetro'): + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.update_consistencygroup(context, group, + add_volumes, + remove_volumes) + return model_update, None, None + + if self._check_group_type_support(opts, 'replication_enabled'): + replica_model = self._get_group_type_value( + opts, 'replication_type') + if not replica_model: + replica_model = constants.REPLICA_ASYNC_MODEL + replicg = replication.ReplicaCG(self.client, + self.replica_client, + self.configuration) + replicg.update(group, add_volumes, remove_volumes, replica_model) + return model_update, None, None + + for volume in add_volumes: + try: + self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + except Exception as err: + raise exception.VolumeDriverException(message=err) + + # Array will create CG at create_cgsnapshot time. Cinder will + # maintain the CG and volumes relationship in the db. + return model_update, None, None + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Create group snapshot.""" + if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + raise NotImplementedError() + + LOG.info('Create group snapshot for group: %(group_id)s', + {'group_id': group_snapshot.group_id}) + + try: + snapshots_model_update = self._create_group_snapshot(snapshots) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Create group snapshots failed. " + "Group snapshot id: %s.", group_snapshot.id) + + model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} + return model_update, snapshots_model_update + + def _create_group_snapshot(self, snapshots): + snapshots_model_update = [] + added_snapshots_info = [] + + try: + for snapshot in snapshots: + snapshot_id = self._create_snapshot_base(snapshot) + info = self.client.get_snapshot_info(snapshot_id) + location = huawei_utils.to_string( + huawei_snapshot_id=info['ID'], + huawei_snapshot_wwn=info['WWN']) + snapshot_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.AVAILABLE, + 'provider_location': location, + } + snapshots_model_update.append(snapshot_model_update) + added_snapshots_info.append(info) + except Exception: + with excutils.save_and_reraise_exception(): + for added_snapshot in added_snapshots_info: + self.client.delete_snapshot(added_snapshot['ID']) + + snapshot_ids = [added_snapshot['ID'] + for added_snapshot in added_snapshots_info] + try: + self.client.activate_snapshot(snapshot_ids) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Active group snapshots %s failed.", snapshot_ids) + for snapshot_id in snapshot_ids: + self.client.delete_snapshot(snapshot_id) + + return snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Delete group snapshot.""" + if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + raise NotImplementedError() + + LOG.info('Delete group snapshot %(snap_id)s for group: ' + '%(group_id)s', + {'snap_id': group_snapshot.id, + 'group_id': group_snapshot.group_id}) + + try: + snapshots_model_update = self._delete_group_snapshot(snapshots) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Delete group snapshots failed. " + "Group snapshot id: %s", group_snapshot.id) + + model_update = {'status': fields.GroupSnapshotStatus.DELETED} + return model_update, snapshots_model_update + + def _delete_group_snapshot(self, snapshots): + snapshots_model_update = [] + for snapshot in snapshots: + self.delete_snapshot(snapshot) + snapshot_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.DELETED + } + snapshots_model_update.append(snapshot_model_update) + + return snapshots_model_update + + def _classify_volume(self, volumes): + normal_volumes = [] + replica_volumes = [] + + for v in volumes: + volume_type = self._get_volume_type(v) + opts = self._get_volume_params(volume_type) + if opts.get('replication_enabled') == 'true': + replica_volumes.append(v) + else: + normal_volumes.append(v) + + return normal_volumes, replica_volumes + + def _failback_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + metadata = huawei_utils.get_volume_metadata(v) + old_status = 'available' + if 'old_status' in metadata: + old_status = metadata['old_status'] + del metadata['old_status'] + v_update['updates'] = {'status': old_status, + 'metadata': metadata} + volumes_update.append(v_update) + + return volumes_update + + def _failback(self, volumes): + if self.active_backend_id in ('', None): + return 'default', [] + + normal_volumes, replica_volumes = self._classify_volume(volumes) + volumes_update = [] + + replica_volumes_update = self.replica.failback(replica_volumes) + volumes_update.extend(replica_volumes_update) + + normal_volumes_update = self._failback_normal_volumes(normal_volumes) + volumes_update.extend(normal_volumes_update) + + self.active_backend_id = "" + secondary_id = 'default' + + # Switch array connection. + self.client, self.replica_client = self.replica_client, self.client + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + return secondary_id, volumes_update + + def _failover_normal_volumes(self, volumes): + volumes_update = [] + + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + metadata = huawei_utils.get_volume_metadata(v) + metadata.update({'old_status': v['status']}) + v_update['updates'] = {'status': 'error', + 'metadata': metadata} + volumes_update.append(v_update) + + return volumes_update + + def _failover(self, volumes): + if self.active_backend_id not in ('', None): + return self.replica_dev_conf['backend_id'], [] + + normal_volumes, replica_volumes = self._classify_volume(volumes) + volumes_update = [] + + replica_volumes_update = self.replica.failover(replica_volumes) + volumes_update.extend(replica_volumes_update) + + normal_volumes_update = self._failover_normal_volumes(normal_volumes) + volumes_update.extend(normal_volumes_update) + + self.active_backend_id = self.replica_dev_conf['backend_id'] + secondary_id = self.active_backend_id + + # Switch array connection. + self.client, self.replica_client = self.replica_client, self.client + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + return secondary_id, volumes_update + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + """Failover all volumes to secondary.""" + if secondary_id == 'default': + try: + secondary_id, volumes_update = self._failback(volumes) + except exception.VolumeBackendAPIException: + msg = _("Error encountered during failback.") + LOG.exception(msg) + raise exception.VolumeDriverException(data=msg) + elif (secondary_id == self.replica_dev_conf['backend_id'] + or secondary_id is None): + try: + secondary_id, volumes_update = self._failover(volumes) + except exception.VolumeBackendAPIException: + msg = _("Error encountered during failover.") + LOG.exception(msg) + raise exception.VolumeDriverException(data=msg) + else: + msg = _("Invalid secondary id %s.") % secondary_id + LOG.error(msg) + raise exception.InvalidReplicationTarget(reason=msg) + + return secondary_id, volumes_update, [] + + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + """Map a snapshot to a host and return target iSCSI information.""" + LOG.info(('initialize_connection_snapshot for snapshot: ' + '%(snapshot_id)s.') + % {'snapshot_id': snapshot.id}) + + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.initialize_connection(volume, connector) + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + """Delete map between a snapshot and a host.""" + LOG.info(('terminate_connection_snapshot for snapshot: ' + '%(snapshot_id)s.') + % {'snapshot_id': snapshot.id}) + + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.terminate_connection(volume, connector) + + def get_lun_id_and_type(self, volume, action, local=True): + if hasattr(volume, 'lun_type'): + metadata = huawei_utils.get_snapshot_metadata(volume) + lun_id = metadata['huawei_snapshot_id'] + lun_type = constants.SNAPSHOT_TYPE + sp_info = self.client.get_snapshot_info(lun_id) + if not self.client.check_snapshot_exist(lun_id): + msg = ("Snapshot %s does not exist on the array." + % volume.id) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if (sp_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH or + sp_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE): + msg = ("Snapshot %s status is not normal " + "or running status is not online." % lun_id) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + lun_id = self._check_volume_exist_on_array( + volume, action, local) + lun_type = constants.LUN_TYPE + + return lun_id, lun_type + + def _get_same_hostid(self, loc_fc_info, rmt_fc_info): + loc_aval_luns = loc_fc_info['aval_luns'] + loc_aval_luns = json.loads(loc_aval_luns) + + rmt_aval_luns = rmt_fc_info['aval_luns'] + rmt_aval_luns = json.loads(rmt_aval_luns) + same_host_id = None + + for i in range(1, 512): + if i in rmt_aval_luns and i in loc_aval_luns: + same_host_id = i + break + + LOG.info("The same hostid is: %s.", same_host_id) + if not same_host_id: + msg = _("Can't find the same host id from arrays.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return same_host_id + + def _get_all_fc_ports(self): + fc_ports = self.client.get_all_fc_ports() + + if self.rmt_client: + try: + rmt_fc_ports = self.rmt_client.get_all_fc_ports() + fc_ports.extend(rmt_fc_ports) + except Exception: + LOG.warning("Get HyperMetro remote storage FC port failed, " + "ignore it") + + if self.replica_client: + try: + rmt_fc_ports = self.replica_client.get_all_fc_ports() + fc_ports.extend(rmt_fc_ports) + except Exception: + LOG.warning("Get Replication remote storage FC port failed, " + "ignore it") + + return fc_ports + + def discover_storage_ports(self, details=False, fabric_map=False, + all_ports=False): + """Implementation for the discovery_driver method.""" + LOG.debug("Discover storage ports: %(details)s, %(fabric_map)s, " + "%(all_ports)s" + % {"details": details, + "fabric_map": fabric_map, + "all_ports": all_ports}) + ports = {} + fc_ports = self._get_all_fc_ports() + for fc_port in fc_ports: + LOG.debug("Port is: %s" % fc_port) + if (not all_ports and fc_port.get("RUNNINGSTATUS") not in + constants.FC_PORT_RUNNING_NORMAL): + continue + port = {"wwpn": str(fc_port["WWN"]), + "status": "online", + "location_info": str(fc_port["LOCATION"]), + } + if fc_port.get("HEALTHSTATUS") != constants.STATUS_HEALTH: + port["status"] = "down" + if details: + if fc_port["RUNSPEED"] != constants.FC_PORT_SPEED_ERROR: + port["speed"] = "%s Gbs" % ( + int(fc_port["RUNSPEED"]) // 1000) + else: + port["speed"] = "unknown" + port["conf_mode"] = fc_port["FCCONFMODE"] + port["port_switch"] = fc_port["PORTSWITCH"] + if all_ports: + port['host_io_permitted'] = (fc_port.get("RUNNINGSTATUS") == + constants.FC_PORT_STATUS_LINK_UP) + ports[port["wwpn"]] = port + if fabric_map: + ports = self._add_fabric_mapping(ports) + return ports + + def _get_tgt_wwns_from_storage(self, client, source_wwn): + tgt_port_wwns, init_targ_map = client.get_init_targ_map(source_wwn, + is_get_manage_tgt=True) + return tgt_port_wwns + + def _get_volume_connection_info(self, lun_map_host_info, lun_info): + connect_info = {} + itl_list = [] + for host_info in lun_map_host_info: + lun_map_host_id = host_info.get('ID') + lun_map_host_name = host_info.get('NAME') + wwns_in_host = self.client.get_host_fc_initiators( + lun_map_host_id) + LOG.info("the wwns_in_host is %s", wwns_in_host) + + source_wwn = huawei_utils.convert_connector_wwns(wwns_in_host) + LOG.info("the source_wwn is %s", source_wwn) + + tgt_wwns = self._get_tgt_wwns_from_storage(self.client, source_wwn) + tgt_wwns_remote = [] + hypermetro_pair = self.client.get_hypermetro_by_local_lun_id(lun_info["ID"]) + if self._is_hypermetro_enabled(hypermetro_pair): + tgt_wwns_remote = self._get_tgt_wwns_from_storage( + self.rmt_client, source_wwn) + + tgt_wwns += tgt_wwns_remote + + host_lun_id = self.client.get_host_lun_id(lun_map_host_id, lun_info["ID"]) + connect_object = { + 'source_wwn': source_wwn, + 'target_lun': host_lun_id, + 'host': lun_map_host_name, + 'target_wwn': tgt_wwns + } + LOG.info("get the connect_object is %s", connect_object) + connect_info[connect_object['host']] = connect_object + itl_obj = discovery_driver.ITLObject( + source_wwn, tgt_wwns, host_lun_id, + vios_host=lun_map_host_name) + itl_list.append(itl_obj) + + return connect_info, itl_list + + def _get_vol_info_for_manage(self, lun_info): + if lun_info.get("ID") is None: + return {} + lun_map_host_info = self.client.get_lun_map_host_info( + lun_info["ID"]) + vol = self._build_volume_info(lun_info) + if not lun_map_host_info: + return vol + connect_info, itl_list = self._get_volume_connection_info( + lun_map_host_info, lun_info) + vol['connection_info'] = connect_info + vol['itl_list'] = itl_list + for key, value in vol['restricted_metadata'].items(): + vol['restricted_metadata'][key] = value.upper() + return vol + + def _get_vols_info_from_vol_refs(self, vol_refs): + vols = [] + lun_id_filter_list = [] + for vol in vol_refs: + wwn = vol.get("uid") + if wwn is None: + continue + lun_info = self.client.get_lun_info_by_wwn(wwn.lower()) + lun_id = lun_info.get("ID") + if lun_id is None or lun_id in lun_id_filter_list: + continue + lun_id_filter_list.append(lun_id) + + vol = self._get_vol_info_for_manage(lun_info) + if not vol: + continue + vols.append(vol) + return vols, lun_id_filter_list + + def _get_vols_from_exist_host_info(self, exist_host_info, lun_id_filter_list): + vols = [] + for host_id in exist_host_info: + lun_in_host = self.client.get_host_lun_info(host_id) + for lun_info in lun_in_host: + lun_id = lun_info.get('ID') + if lun_id is None or lun_id in lun_id_filter_list: + continue + lun_id_filter_list.append(lun_id) + vol = self._get_vol_info_for_manage(lun_info) + if not vol: + continue + vols.append(vol) + return vols + + def _get_vols_from_filter(self, filter_set, vol_refs): + vols = [] + exist_host_info = {} + for wwn in filter_set: + ini = self.client.get_fc_initiator_info(wwn) + if ini and ini['ISFREE'] == 'false': + exist_host_info[ini['PARENTID']] = ini['PARENTNAME'] + + if not exist_host_info and not vol_refs: + return vols + + lun_id_filter_list = [] + if vol_refs: + vols_from_vol_refs, lun_id_filter_list = ( + self._get_vols_info_from_vol_refs(vol_refs)) + vols += vols_from_vol_refs + + vols_from_exist_host = self._get_vols_from_exist_host_info( + exist_host_info, lun_id_filter_list) + vols += vols_from_exist_host + return vols + + def get_volume_info(self, vol_refs, filter_set): + """ + Return volume information from the backend. + If filter set is passed get host info for each volume. + For each Volumes the driver needs to return a dictionary containing + the following attributes: + name: The Name of the Volume defined on the Storage Provider + storage_pool: The Storage pool of the volume + status: The Status of the Volume, matching the status definition + uuid: The UUID of the VM when created thru OS (Optional) + status: The Status of the Volume, matching the definition + size: The Size of the Volume in GB + itl_list: A dictionary containing vscsi itl information + connection_info: Host and storage wppns for volume connections + k2udid: Passed in from HMC for vscsi volumes + pg83NAA: Optional in cases where the unique identifier on the + storage is not same as pg83NAA + restricted_metadata: The Additional Meta-data from the Driver + vdisk_id: The Identifier for the Volume on the Back-end + vdisk_name: The Name of the Volume on the Back-end + vdisk_uid: The unique identifier in the storage backend + naa: The pg83 naa + support: Dictionary stating whether the Volume can be managed + status: Whether or not it is "supported" or "not_supported" + reasons: List of Text Strings as to why it isn't supported + """ + if vol_refs or filter_set: + LOG.info("Filter Set %(filter_set)s " + "vol_refs : %(vol_rf)s", + {"filter_set": filter_set, + "vol_rf": vol_refs}) + vols = self._get_vols_from_filter(filter_set, vol_refs) + return vols + + vols = [] + pools = self.client.get_inband_pools() + for pool in pools: + for lun in self.client.get_all_luns(pool["ID"]): + vol = self._build_volume_info(lun) + vols.append(vol) + return vols + + def _build_volume_info(self, lun): + size = int(lun["CAPACITY"]) // constants.CAPACITY_UNIT + + volume = { + 'name': lun["NAME"], + 'storage_pool': lun["PARENTNAME"], + 'status': fields.VolumeStatus.AVAILABLE, + 'size': size, + 'description': lun['DESCRIPTION'], + 'is_mapped': strutils.bool_from_string(lun['EXPOSEDTOINITIATOR']), + 'restricted_metadata': { + 'vdisk_id': lun['WWN'], + 'vdisk_name': lun['WWN'], + 'vdisk_uid': lun['WWN'], + 'volume_wwn': lun['WWN'] + }, + 'metadata': { + 'storage_pool': lun["PARENTNAME"], + 'volume_wwn': lun['WWN'] + }, + 'provider_location': lun['ID'], + } + + self._check_lun_info(volume, lun) + self._check_volume_status(volume) + self._check_in_use(volume) + return volume + + def _check_lun_info(self, volume, lun): + + def check_health_status(lun): + return lun["HEALTHSTATUS"] == constants.LUN_HEALTH_STATUS_NORMAL + + def check_running_status(lun): + return lun['RUNNINGSTATUS'] == constants.LUN_RUNNING_STATUS_ONLINE + + def check_snapshot(lun): + # The volume with snapshots is not permitted to managed + snap_ids = lun.get('SNAPSHOTIDS') + return not snap_ids or snap_ids == '[]' + + def check_function_type(lun): + # Dorado V6 scenario: Only volume can be managed + return lun.get('functionType', '1') == constants.FUNCTION_TYPE_LUN + + def check_usage_type(lun): + # Only traditional lun can be managed + return lun['USAGETYPE'] == constants.LUN_USAGE_TYPE_TRADITIONAL + + def check_has_rss_object(lun): + if 'HASRSSOBJECT' not in lun: + return True + hasrssobject = json.loads(lun['HASRSSOBJECT']) + hypermetro = hasrssobject.pop('HyperMetro', 'FALSE') + if hypermetro.lower() == 'true' and not self.rmt_client: + return False + + for key, value in hasrssobject.items(): + if value.lower() == "true": + LOG.debug("%s is enabled, manage is not supported", key) + return False + return True + + funcs = (check_health_status, check_running_status, check_snapshot, + check_function_type, check_usage_type, check_has_rss_object) + for func in funcs: + if not func(lun): + volume['status'] = fields.VolumeStatus.ERROR + LOG.debug("LUN(%s) info check: %s failed", + lun["NAME"], func.__name__) + break + + def _post_process_volume_info(self, volume_copy): + """ + Additional information to be added to the volume before + returning from query_volumes + """ + super(HuaweiBaseDriver, self)._post_process_volume_info(volume_copy) + LOG.info("Enter: _post_process_volume_info, volumes: %s", volume_copy) + lun_id = volume_copy['provider_location'] + lun = self.client.get_lun_info(lun_id) + + provider_location_update = self._build_provider_location(lun) + volume_copy.update(provider_location_update) + + replication_update = self._build_replication_info(lun) + volume_copy.update(replication_update) + LOG.info("Exit: _post_process_volume_info, volumes: %s", volume_copy) + + def _build_provider_location(self, lun): + hypermetro_pair = self.client.get_hypermetro_by_lun_name(lun['NAME']) + hypermetro_enabled = self._is_hypermetro_enabled(hypermetro_pair) + provider_location = { + 'huawei_lun_id': lun['ID'], + 'huawei_sn': self.sn, + 'huawei_lun_wwn': lun['WWN'], + 'hypermetro': hypermetro_enabled + } + if hypermetro_enabled: + provider_location['hypermetro_id'] = hypermetro_pair['ID'] + + provider_location_json = huawei_utils.to_string(**provider_location) + return {'provider_location': provider_location_json} + + def _is_hypermetro_enabled(self, hypermetro_pair): + hypermetro_enabled = bool(hypermetro_pair) + if not hypermetro_enabled: + return False + + # Will raise error, but will not complaint in GUI + if not self.rmt_client: + msg = _("Volume has enabled HyperMetro but the remote client " + "does not config in backend") + raise exception.VolumeBackendAPIException(data=msg) + + if hypermetro_pair.get("CGID"): + msg = _("HyperMetro in consistency group cannot be managed") + raise exception.VolumeBackendAPIException(data=msg) + + return True + + def _build_replication_info(self, lun): + pair = self.client.get_pair_info_by_lun_id(lun['ID']) + replication_enabled = bool(pair) + if not replication_enabled: + return {} + + msg = _("Volume with RemoteReplication is not supported") + raise exception.VolumeBackendAPIException(data=msg) + + def check_for_deleted_volumes(self, context, volumes, tagged_vols=None): + """ + Determines which of the Volumes no longer exist on the Provider. + The driver implementing this interface can override this method to + tell the manager which volumes are no longer found. If not, + overridden, volumes will not be marked deleted out-of-band. A + volume must be reported as deleted on two subsequent calls for the + manager to mark it as deleted out-of-band. + + :@param context: The security context to use. + :@param volumes: The volume objects to check for existence. + :@param tagged_vols: Optional param. If specified, it is a list of + of volumes that are already marked deleted out-of- + band. The implementer has the option of "un-marking" + these volumes if they are found again. This would + not necessarily be applicable for providers that + re-use unique device IDs. + @return: A list of volumes that have not been found on the back end. + If any error occurs while compiling the list, the empty list + is returned. + """ + LOG.debug("Enter: check_for_deleted_volumes, volumes: %s, " + "tagged_vols: %s", volumes, tagged_vols) + changed_vols = list() + # nothing to do if we didn't get any volumes passed in. just return + if len(volumes) == 0: + return changed_vols + for volume in volumes: + if volume.get('replication_status') == 'failed-over': + return changed_vols + + wwns = self._get_all_lun_wwws() + for volume in volumes: + metadata = volume.get('volume_restricted_metadata', list()) + metadata = {m['key']: m['value'] for m in metadata} + wwn = metadata.get(RESTRICTED_METADATA_VDISK_UID_KEY, "") + if wwn.lower() not in wwns: + self._handle_out_of_band_deleted(metadata, volume) + changed_vols.append(volume) + + if changed_vols: + names = [vol["name"] for vol in changed_vols] + LOG.info("The following volumes do not exist: %s", names) + + return changed_vols + + def _handle_out_of_band_deleted(self, metadata, volume): + metadata.update(out_of_band_deleted="True") + self._update_restricted_metadata(volume['id'], metadata) + + def _get_all_lun_wwws(self): + pools = self.client.get_inband_pools() + return {lun['WWN'] for pool in pools for lun in + self.client.get_all_luns(pool["ID"])} + + +class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): + """ISCSI driver for Huawei storage arrays. + + Version history: + 1.0.0 - Initial driver + 1.1.0 - Provide Huawei OceanStor storage 18000 driver + 1.1.1 - Code refactor + CHAP support + Multiple pools support + ISCSI multipath support + SmartX support + Volume migration support + Volume retype support + 2.0.0 - Rename to HuaweiISCSIDriver + 2.0.1 - Manage/unmanage volume support + 2.0.2 - Refactor HuaweiISCSIDriver + 2.0.3 - Manage/unmanage snapshot support + 2.0.5 - Replication V2 support + 2.0.6 - Support iSCSI configuration in Replication + 2.0.7 - Hypermetro support + Hypermetro consistency group support + Consistency group support + Cgsnapshot support + 2.0.8 - Backup snapshot optimal path support + 2.0.9 - Support reporting disk type of pool + """ + + def __init__(self, *args, **kwargs): + super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'iSCSI' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + # Attach local lun. + iscsi_info = self._initialize_connection(volume, connector) + + # Attach remote lun if exists. + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Attach Volume, metadata is: %s.", metadata) + if metadata.get('hypermetro'): + try: + rmt_iscsi_info = ( + self._initialize_connection(volume, connector, False)) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(volume, connector) + + multipath = connector.get('multipath', False) + if multipath: + target_iqn = [] + target_iqn.extend(iscsi_info['data']['target_iqns']) + target_iqn.extend(rmt_iscsi_info['data']['target_iqns']) + iscsi_info['data']['target_iqns'] = target_iqn + + target_portal = [] + target_portal.extend(iscsi_info['data']['target_portals']) + target_portal.extend(rmt_iscsi_info['data']['target_portals']) + iscsi_info['data']['target_portals'] = target_portal + iscsi_info['data']['target_luns'].extend( + rmt_iscsi_info['data']['target_luns']) + elif self.use_ultrapath: + target_iqn = [] + target_iqn.extend(iscsi_info['data']['target_iqns']) + target_iqn.extend(rmt_iscsi_info['data']['target_iqns']) + iscsi_info['data']['target_iqns'] = target_iqn + target_portal = [] + target_portal.extend(iscsi_info['data']['target_portals']) + target_portal.extend(rmt_iscsi_info['data']['target_portals']) + iscsi_info['data']['target_portals'] = target_portal + iscsi_info['data']['target_luns'].extend( + rmt_iscsi_info['data']['target_luns']) + else: + msg = (_("Hypermetro must use multipath or ultrapath.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('initialize_common_connection_iscsi, ' + 'return data is: %s.', iscsi_info) + return iscsi_info + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('_initialize_connection, attach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + # Determine use which client, local or remote. + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE, local) + lun_info = client.get_lun_info(lun_id, lun_type) + + initiator_name = connector['initiator'] + LOG.info( + 'initiator name: %(initiator_name)s, ' + 'LUN ID: %(lun_id)s, lun type: %(lun_type)s.', + {'initiator_name': initiator_name, + 'lun_id': lun_id, + 'lun_type': lun_type}) + + (iscsi_iqns, + target_ips, + portgroup_id) = client.get_iscsi_params(connector) + LOG.info('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' + 'target_ip: %(target_ip)s, ' + 'portgroup_id: %(portgroup_id)s.', + {'iscsi_iqn': iscsi_iqns, + 'target_ip': target_ips, + 'portgroup_id': portgroup_id}) + + # Create hostgroup if not exist. + host_id = client.add_host_with_check(connector['host']) + try: + client.ensure_initiator_added(initiator_name, host_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.remove_host_with_check(host_id, client) + + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + hypermetro_lun = metadata.get('hypermetro') + + # Mapping lungroup and hostgroup to view. + map_info = client.do_mapping(lun_id, hostgroup_id, host_id, + portgroup_id, lun_type, hypermetro_lun) + + hostlun_id = client.get_host_lun_id(host_id, lun_id, lun_type) + + LOG.info("initialize_connection, host lun id is: %s.", + hostlun_id) + + chapinfo = client.find_chap_info(client.iscsi_info, initiator_name) + if (not chapinfo + and client.is_initiator_used_chap(initiator_name)): + msg = (_("Chap is not configed but initiator %s used chap on " + "array, please check and remove chap for this initiator.") + % initiator_name) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Return iSCSI properties. + properties = {} + properties['map_info'] = map_info + properties['target_discovered'] = False + properties['volume_id'] = volume.id + multipath = connector.get('multipath', False) + hostlun_id = int(hostlun_id) + if multipath: + properties['target_iqns'] = [iqn for iqn in iscsi_iqns] + properties['target_portals'] = [ + '%s:3260' % ip for ip in target_ips] + properties['target_luns'] = [hostlun_id] * len(target_ips) + elif self.use_ultrapath: + properties['target_iqns'] = [iqn for iqn in iscsi_iqns] + properties['target_portals'] = [ + '%s:3260' % ip for ip in target_ips] + properties['target_luns'] = [hostlun_id] * len(target_ips) + properties['target_num'] = len(target_ips) + properties['libvirt_iscsi_use_ultrapath'] = self.use_ultrapath + properties['lun_wwn'] = lun_info['WWN'] + else: + properties['target_portal'] = ('%s:3260' % target_ips[0]) + properties['target_iqn'] = iscsi_iqns[0] + properties['target_lun'] = hostlun_id + + LOG.info("initialize_connection success. Return data: %s.", + properties) + + # If use CHAP, return CHAP info. + if chapinfo: + chap_username, chap_password = chapinfo.split(';') + properties['auth_method'] = 'CHAP' + properties['auth_username'] = chap_username + properties['auth_password'] = chap_password + + return {'driver_volume_type': 'iscsi', 'data': properties} + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("terminate_connection, metadata is: %s.", metadata) + self._terminate_connection(volume, connector) + + if metadata.get('hypermetro'): + self._terminate_connection(volume, connector, False) + + LOG.info('terminate_connection success.') + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('_terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + + # Determine use which client, local or remote. + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + + initiator_name = connector['initiator'] + host_name = connector['host'] + lungroup_id = None + + LOG.info( + 'terminate_connection: initiator name: %(ini)s, ' + 'LUN ID: %(lunid)s, lun type: %(lun_type)s.', + {'ini': initiator_name, + 'lunid': lun_id, + 'lun_type': lun_type}) + + portgroup = None + portgroup_id = None + view_id = None + left_lunnum = -1 + for ini in client.iscsi_info: + if ini['Name'] == initiator_name: + for key in ini: + if key == 'TargetPortGroup': + portgroup = ini['TargetPortGroup'] + break + + if portgroup: + portgroup_id = client.get_tgt_port_group(portgroup) + host_id = huawei_utils.get_host_id(client, host_name) + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + + # Remove lun from lungroup. + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + else: + LOG.warning("LUN is not in lungroup. " + "LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, + "lungroup_id": lungroup_id}) + + # Remove portgroup from mapping view if no lun left in lungroup. + if lungroup_id: + left_lunnum = client.get_obj_count_from_lungroup(lungroup_id) + + if portgroup_id and view_id and (int(left_lunnum) <= 0): + if client.is_portgroup_associated_to_view(view_id, portgroup_id): + client.delete_portgroup_mapping_view(view_id, portgroup_id) + if view_id and (int(left_lunnum) <= 0): + client.remove_chap(initiator_name) + + if client.lungroup_associated(view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, lungroup_id) + client.delete_lungroup(lungroup_id) + if client.is_initiator_associated_to_host(initiator_name, host_id): + client.remove_iscsi_from_host(initiator_name) + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if client.hostgroup_associated(view_id, hostgroup_id): + client.delete_hostgoup_mapping_view(view_id, hostgroup_id) + client.remove_host_from_hostgroup(hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + client.remove_host(host_id) + client.delete_mapping_view(view_id) + + +class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): + """FC driver for Huawei OceanStor storage arrays. + + Version history: + 1.0.0 - Initial driver + 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver + 1.1.1 - Code refactor + Multiple pools support + SmartX support + Volume migration support + Volume retype support + FC zone enhancement + Volume hypermetro support + 2.0.0 - Rename to HuaweiFCDriver + 2.0.1 - Manage/unmanage volume support + 2.0.2 - Refactor HuaweiFCDriver + 2.0.3 - Manage/unmanage snapshot support + 2.0.4 - Balanced FC port selection + 2.0.5 - Replication V2 support + 2.0.7 - Hypermetro support + Hypermetro consistency group support + Consistency group support + Cgsnapshot support + 2.0.8 - Backup snapshot optimal path support + 2.0.9 - Support reporting disk type of pool + """ + + def __init__(self, *args, **kwargs): + super(HuaweiFCDriver, self).__init__(*args, **kwargs) + self.fcsan = None + + def get_volume_stats(self, refresh=False): + """Get volume status.""" + data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'FC' + data['driver_version'] = self.VERSION + data['vendor_name'] = 'Huawei' + return data + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("initialize_connection, metadata is: %s.", metadata) + fc_info = self._initialize_connection(volume, connector) + if metadata.get('hypermetro'): + rmt_fc_info = self._initialize_connection(volume, connector, False) + fc_info = self._construct_fc_initialize_info(fc_info, rmt_fc_info) + + LOG.info("Return FC info is: %s.", fc_info) + fczm_utils.add_fc_zone(fc_info) + return fc_info + + def _get_hypermetro_rmt_lun_id(self, volume): + hypermetro_id = huawei_utils.get_hypermetro_id(volume) + if hypermetro_id: + metro_info = self.rmt_client.get_hypermetro_by_id(hypermetro_id) + else: + lun_name = huawei_utils.get_lun_name(self.rmt_client, volume, True) + metro_info = self.rmt_client.get_hypermetro_by_lun_name(lun_name) + + if not metro_info: + msg = _('Volume %s is not in hypermetro pair') % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return metro_info['LOCALOBJID'] + + def _initialize_connection(self, volume, connector, local=True): + LOG.info('FC _initialize_connection, attach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + # Determine use which client, local or remote. + client = self.client if local else self.rmt_client + + if not local: + lun_type = constants.LUN_TYPE + lun_id = self._get_hypermetro_rmt_lun_id(volume) + else: + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + + lun_info = client.get_lun_info(lun_id, lun_type) + wwns = huawei_utils.convert_connector_wwns(connector['wwpns']) + LOG.info( + 'initialize_connection, initiator: %(wwpns)s,' + ' LUN ID: %(lun_id)s, lun type: %(lun_type)s.', + {'wwpns': wwns, + 'lun_id': lun_id, + 'lun_type': lun_type}) + + self_create_host_name = huawei_utils.encode_host_name( + 'O_s_' + connector['host']) + host_id, exist_host_name = huawei_utils.get_exist_host(client, wwns) + portg_id = None + + if not host_id: + host_id = client.add_host_with_check(self_create_host_name) + + if not self.fcsan: + self.fcsan = fczm_utils.create_lookup_service() + + if exist_host_name and exist_host_name != self_create_host_name: + tgt_port_wwns, init_targ_map = client.get_init_targ_map(wwns) + elif self.fcsan: + # Use FC switch. + zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, client) + try: + (tgt_port_wwns, portg_id, init_targ_map) = ( + zone_helper.build_ini_targ_map(wwns, host_id)) + except Exception as err: + self.remove_host_with_check(host_id, client) + msg = _('build_ini_targ_map fails. %s') % err + raise exception.VolumeBackendAPIException(data=msg) + + for ini in init_targ_map: + client.ensure_fc_initiator_added(ini, host_id) + else: + # Not use FC switch. + online_wwns_in_host = ( + client.get_host_online_fc_initiators(host_id)) + online_free_wwns = client.get_online_free_wwns() + fc_initiators_on_array = client.get_fc_initiator_on_array() + wwns = [i for i in wwns if i in fc_initiators_on_array] + LOG.info("initialize_connection, " + "online initiators on the array: %s.", wwns) + + for wwn in wwns: + if (wwn not in online_wwns_in_host + and wwn not in online_free_wwns): + wwns_in_host = client.get_host_fc_initiators(host_id) + iqns_in_host = client.get_host_iscsi_initiators(host_id) + if not (wwns_in_host or iqns_in_host or + client.is_host_associated_to_hostgroup(host_id)): + client.remove_host(host_id) + + msg = (("Can't add FC initiator %(wwn)s to host %(host)s," + " please check if this initiator has been added " + "to other host or isn't present on array.") + % {"wwn": wwn, "host": host_id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for wwn in wwns: + client.ensure_fc_initiator_added(wwn, host_id) + + (tgt_port_wwns, init_targ_map) = ( + client.get_init_targ_map(wwns)) + + # Add host into hostgroup. + hostgroup_id = client.add_host_to_hostgroup(host_id) + + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("initialize_connection, metadata is: %s.", metadata) + hypermetro_lun = metadata.get('hypermetro') + + map_info = client.do_mapping(lun_id, hostgroup_id, + host_id, portg_id, + lun_type, hypermetro_lun) + host_lun_id = client.get_host_lun_id(host_id, lun_id, + lun_type) + + # Return FC properties. + fc_info = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': int(host_lun_id), + 'target_discovered': True, + 'target_wwn': tgt_port_wwns, + 'volume_id': volume.id, + 'map_info': map_info, + 'lun_wwn': lun_info['WWN'], + 'libvirt_iscsi_use_ultrapath': + self.use_ultrapath}, } + if init_targ_map: + fc_info['data']['initiator_target_map'] = init_targ_map + + LOG.info("Return %(local)s FC info is: %(info)s." % + {'local': 'local' if local else 'remote', "info": fc_info}) + return fc_info + + def _construct_fc_initialize_info(self, loc_fc_info, rmt_fc_info): + loc_tgt_wwn = loc_fc_info['data']['target_wwn'] + rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] + + local_ini_tgt_map = loc_fc_info['data']['initiator_target_map'] \ + if loc_fc_info['data'].get('initiator_target_map') else {} + rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] \ + if rmt_fc_info['data'].get('initiator_target_map') else {} + + loc_fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) + for k in rmt_ini_tgt_map: + local_ini_tgt_map[k] = (local_ini_tgt_map.get(k, []) + + rmt_ini_tgt_map[k]) + + loc_map_info = loc_fc_info['data']['map_info'] + rmt_map_info = rmt_fc_info['data']['map_info'] + same_host_id = self._get_same_hostid(loc_map_info, + rmt_map_info) + self.client.change_hostlun_id(loc_map_info, same_host_id) + self.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) + + loc_fc_info['data']['target_lun'] = same_host_id + + return loc_fc_info + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + metadata = huawei_utils.get_lun_metadata(volume) + LOG.info("Detach Volume, metadata is: %s.", metadata) + fc_info = self._terminate_connection(volume, connector) + if metadata.get('hypermetro'): + rmt_fc_info = self._terminate_connection(volume, connector, False) + if self.fcsan: + fc_info = self._construct_fc_terminate_info( + fc_info, rmt_fc_info) + LOG.info("terminate_connection, return data is: %s.", + fc_info) + fczm_utils.remove_fc_zone(fc_info) + + return fc_info + + def _construct_fc_terminate_info(self, loc_fc_info, rmt_fc_info): + local_ini_tgt_map = loc_fc_info['data']['initiator_target_map'] \ + if loc_fc_info['data'].get('initiator_target_map') else {} + rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] \ + if rmt_fc_info['data'].get('initiator_target_map') else {} + + for k in rmt_ini_tgt_map: + local_ini_tgt_map[k] = (local_ini_tgt_map.get(k, []) + + rmt_ini_tgt_map[k]) + return loc_fc_info + + def _terminate_connection(self, volume, connector, local=True): + LOG.info('FC _terminate_connection, detach %(local)s volume.', + {'local': 'local' if local else 'remote'}) + # Determine use which client, local or remote. + client = self.client if local else self.rmt_client + + lun_id, lun_type = self.get_lun_id_and_type( + volume, constants.VOLUME_NOT_EXISTS_WARN, local) + wwns = huawei_utils.convert_connector_wwns(connector['wwpns']) + + left_lunnum = -1 + lungroup_id = None + view_id = None + LOG.info('terminate_connection: wwpns: %(wwns)s, ' + 'LUN ID: %(lun_id)s, lun type: %(lun_type)s.', + {'wwns': wwns, 'lun_id': lun_id, 'lun_type': lun_type}) + + self_create_host_name = huawei_utils.encode_host_name( + 'O_s_' + connector['host']) + host_id, host_name = huawei_utils.get_exist_host(client, wwns) + if not host_id: + host_id = huawei_utils.get_host_id(client, + self_create_host_name) + is_self_create_host = True + else: + is_self_create_host = host_name == self_create_host_name + + if host_id: + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + view_id = client.find_mapping_view(mapping_view_name) + if view_id: + lungroup_id = client.find_lungroup_from_map(view_id) + + if lun_id and lungroup_id: + lungroup_ids = client.get_lungroupids_by_lunid(lun_id, + lun_type) + if lungroup_id in lungroup_ids: + client.remove_lun_from_lungroup(lungroup_id, + lun_id, + lun_type) + else: + LOG.warning("LUN is not in lungroup. " + "LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", + {"lun_id": lun_id, + "lungroup_id": lungroup_id}) + + else: + LOG.warning("Can't find lun on the array.") + + if lungroup_id: + left_lunnum = client.get_obj_count_from_lungroup(lungroup_id) + + if int(left_lunnum) > 0: + fc_info = {'driver_volume_type': 'fibre_channel', + 'data': {}} + else: + fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( + wwns, host_id, is_self_create_host, client) + if lungroup_id: + if view_id and client.lungroup_associated( + view_id, lungroup_id): + client.delete_lungroup_mapping_view(view_id, + lungroup_id) + client.delete_lungroup(lungroup_id) + if portg_id: + if view_id and client.is_portgroup_associated_to_view( + view_id, portg_id): + client.delete_portgroup_mapping_view(view_id, + portg_id) + client.delete_portgroup(portg_id) + + if host_id: + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = client.find_hostgroup(hostgroup_name) + if hostgroup_id: + if view_id and client.hostgroup_associated( + view_id, hostgroup_id): + client.delete_hostgoup_mapping_view( + view_id, hostgroup_id) + client.remove_host_from_hostgroup( + hostgroup_id, host_id) + client.delete_hostgroup(hostgroup_id) + + if not client.check_fc_initiators_exist_in_host( + host_id): + client.remove_host(host_id) + + if view_id: + client.delete_mapping_view(view_id) + return fc_info + + def _delete_zone_and_remove_fc_initiators(self, wwns, host_id, + is_self_create_host, client): + # Get tgt_port_wwns and init_targ_map to remove zone. + portg_id = None + init_targ_map = {} + + if not self.fcsan: + self.fcsan = fczm_utils.create_lookup_service() + + if self.fcsan: + zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, client) + portg_id, init_targ_map = zone_helper.get_init_targ_map(wwns, + host_id) + + # Remove the initiators from host if need. + if host_id and is_self_create_host: + fc_initiators = client.get_host_fc_initiators(host_id) + for wwn in wwns: + if wwn in fc_initiators: + client.remove_fc_from_host(wwn) + + info = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': init_targ_map}} + return info, portg_id diff --git a/PowerVC/huawei_t.py b/PowerVC/huawei_t.py new file mode 100644 index 0000000..a31c55b --- /dev/null +++ b/PowerVC/huawei_t.py @@ -0,0 +1,602 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume Drivers for Huawei OceanStor T series storage arrays. +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.huawei.extend import fc_zone_helper +from cinder.volume.drivers.huawei import ssh_client +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + +FC_PORT_CONNECTED = '10' +contrs = ['A', 'B'] + +zone_manager_opts = [ + cfg.StrOpt('zone_driver', + default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' + '.BrcdFCZoneDriver', + help='FC Zone Driver responsible for zone management') +] +huawei_opts = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='The configuration file for the Cinder Huawei driver.') +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts) + + +class HuaweiTISCSIDriver(driver.ISCSIDriver): + """ISCSI driver for Huawei OceanStor T series storage arrays.""" + + VERSION = '1.1.0' + + def __init__(self, *args, **kwargs): + super(HuaweiTISCSIDriver, self).__init__(*args, **kwargs) + self.configuration = kwargs.get('configuration', None) + if not self.configuration: + msg = (_('_instantiate_driver: configuration not found.')) + raise exception.InvalidInput(reason=msg) + + self.configuration.append_config_values(huawei_opts) + + def do_setup(self, context): + """Instantiate common class.""" + self.sshclient = ssh_client.TseriesClient( + configuration=self.configuration) + self.sshclient.do_setup(context) + self.sshclient.check_storage_pools() + + def check_for_setup_error(self): + """Check something while starting.""" + self.sshclient.check_for_setup_error() + + def create_volume(self, volume): + """Create a new volume.""" + return self.sshclient.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + return self.sshclient.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + return self.sshclient.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.sshclient.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.sshclient.delete_volume(volume) + + def create_export(self, context, volume, connector=None): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + snapshot_id = self.sshclient.create_snapshot(snapshot) + return {'provider_location': snapshot_id} + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.sshclient.delete_snapshot(snapshot) + + def initialize_ultrapath_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + def get_targets_ips_info(initiator): + iscsi_conf = self._get_iscsi_conf(self.configuration) + target_ip = [] + + if iscsi_conf['DefaultTargetIP']: + for ip in iscsi_conf['DefaultTargetIP'].split(','): + target_ip.append(ip) + + if not target_ip: + msg = (_('get_targets_ips_info: Failed to get target IP ' + 'for initiator %(ini)s, please check config file.') + % {'ini': initiator}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + return self.sshclient.get_tgt_iqn_ultrapath( + map(lambda x: x.strip(), target_ip)) + + msg = (_('initialize_multipath_connection: volume name: %(vol)s, ' + 'host: %(host)s, initiator: %(ini)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'ini': connector['initiator']}) + LOG.debug(msg) + self.sshclient.update_login_info() + ips_info = get_targets_ips_info(connector['initiator']) + + # First, add a host if not added before. + host_id = self.sshclient.add_host(connector['host'], connector['ip'], + connector['initiator']) + + iscsi_conf = self._get_iscsi_conf(self.configuration) + chapinfo = self.sshclient.find_chap_info(iscsi_conf, + connector['initiator']) + used = self.sshclient.is_initiator_used_chap(connector['initiator']) + if not chapinfo and used: + msg = (_("Chap is not configed but initiator %s used chap on " + "array, please cheak and remove chap for this initiator.") + % connector['initiator']) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Then, add the iSCSI port to the host. + self.sshclient.add_iscsi_port_to_host(host_id, connector, chapinfo) + + # Finally, map the volume to the host. + lun_id = self.sshclient.check_volume_exist_on_array(volume) + if not lun_id: + msg = _("Volume %s not exists on the array.") % volume['id'] + raise exception.VolumeBackendAPIException(data=msg) + + hostlun_id = self.sshclient.map_volume(host_id, lun_id) + + # Change LUN ctr for better performance, just for single path. + lun_details = self.sshclient.get_lun_details(lun_id) + + target_portal_list = [] + target_iqn_list = [] + for info in ips_info: + target_portal_list.append('%s:%s' % (info[1], '3260')) + target_iqn_list.append(info[0]) + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = target_portal_list + properties['target_iqn'] = target_iqn_list + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + properties['lun_wwn'] = lun_details['LUNWWN'] + properties['target_num'] = len(ips_info) + properties['description'] = 'huawei' + + if chapinfo: + properties['auth_method'] = 'CHAP' + properties['auth_username'] = chapinfo[0] + properties['auth_password'] = chapinfo[1] + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def initialize_common_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + msg = (_('initialize_common_connection: volume name: %(vol)s, ' + 'host: %(host)s, initiator: %(ini)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'ini': connector['initiator']}) + LOG.debug(msg) + self.sshclient.update_login_info() + (iscsi_iqn, target_ip, port_ctr) = ( + self._get_iscsi_params(connector['initiator'])) + + # First, add a host if not added before. + host_id = self.sshclient.add_host(connector['host'], connector['ip'], + connector['initiator']) + + iscsi_conf = self._get_iscsi_conf(self.configuration) + chapinfo = self.sshclient.find_chap_info(iscsi_conf, + connector['initiator']) + used = self.sshclient.is_initiator_used_chap(connector['initiator']) + if not chapinfo and used: + msg = (_("Chap is not configed but initiator %s used chap on " + "array, please cheak and remove chap for this initiator.") + % connector['initiator']) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Then, add the iSCSI port to the host. + self.sshclient.add_iscsi_port_to_host(host_id, connector, chapinfo) + + # Finally, map the volume to the host. + lun_id = self.sshclient.check_volume_exist_on_array(volume) + if not lun_id: + msg = _("Volume %s not exists on the array.") % volume['id'] + raise exception.VolumeBackendAPIException(data=msg) + + hostlun_id = self.sshclient.map_volume(host_id, lun_id) + + # Change LUN ctr for better performance, just for single path. + lun_details = self.sshclient.get_lun_details(lun_id) + + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = ('%s:%s' % (target_ip, '3260')) + properties['target_iqn'] = iscsi_iqn + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + properties['lun_wwn'] = lun_details['LUNWWN'] + properties['description'] = 'huawei' + + if chapinfo: + properties['auth_method'] = 'CHAP' + properties['auth_username'] = chapinfo[0] + properties['auth_password'] = chapinfo[1] + + return {'driver_volume_type': 'iscsi', 'data': properties} + + @utils.synchronized('huawei_t_mount', external=False) + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + if 'nova_use_ultrapath' in connector: + if connector.get('nova_use_ultrapath'): + return self.initialize_ultrapath_connection(volume, connector) + else: + return self.initialize_common_connection(volume, connector) + + if self.configuration.safe_get("use_ultrapath_for_image_xfer"): + return self.initialize_ultrapath_connection(volume, connector) + return self.initialize_common_connection(volume, connector) + + def _get_iscsi_params(self, initiator): + """Get target iSCSI params, including iqn and IP.""" + iscsi_conf = self._get_iscsi_conf(self.configuration) + target_ip = None + for ini in iscsi_conf['Initiator']: + if ini['Name'] == initiator: + target_ip = ini['TargetIP'] + break + # If didn't specify target IP for some initiator, use default IP. + if not target_ip: + if iscsi_conf['DefaultTargetIP']: + target_ip = iscsi_conf['DefaultTargetIP'] + + else: + msg = (_('_get_iscsi_params: Failed to get target IP ' + 'for initiator %(ini)s, please check config file.') + % {'ini': initiator}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + (target_iqn, port_ctr) = self.sshclient.get_tgt_iqn(target_ip) + return (target_iqn, target_ip, port_ctr) + + def _get_iscsi_conf(self, configuration): + """Get iSCSI info from config file. + + This function returns a dict: + {'DefaultTargetIP': '11.11.11.11', + 'Initiator': [{'Name': 'iqn.xxxxxx.1', 'TargetIP': '11.11.11.12'}, + {'Name': 'iqn.xxxxxx.2', 'TargetIP': '11.11.11.13'} + ] + } + + """ + + iscsiinfo = {} + config_file = configuration.cinder_huawei_conf_file + root = self.sshclient.parse_xml_file(config_file) + + default_ip = root.findtext('iSCSI/DefaultTargetIP') + if default_ip: + iscsiinfo['DefaultTargetIP'] = default_ip.strip() + else: + iscsiinfo['DefaultTargetIP'] = None + initiator_list = [] + tmp_dic = {} + for dic in root.findall('iSCSI/Initiator'): + # Strip the values of dict. + for k, v in dic.items(): + tmp_dic[k] = v.strip() + initiator_list.append(tmp_dic) + iscsiinfo['Initiator'] = initiator_list + return iscsiinfo + + @utils.synchronized('huawei_t_mount', external=False) + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + # Check the connector, as we can't get initiatorname during + # local_delete and the whole info in force-delete + if ('host' not in connector) or ('initiator' not in connector): + LOG.info("terminate_connection: delete or force delete.") + return + + host_name = connector['host'] + iqn = connector['initiator'] + LOG.debug('terminate_connection: volume: %(vol)s, host: %(host)s, ' + 'connector: %(initiator)s' + % {'vol': volume['name'], + 'host': host_name, + 'initiator': iqn}) + + self.sshclient.update_login_info() + lun_id = self.sshclient.check_volume_exist_on_array(volume) + iscsi_conf = self._get_iscsi_conf(self.configuration) + chapinfo = self.sshclient.find_chap_info(iscsi_conf, + connector['initiator']) + + if not lun_id: + LOG.warning("Volume %s not exists on the array.", volume['id']) + host_id = self.sshclient.get_host_id(host_name, iqn) + self.sshclient.remove_map(lun_id, host_id) + + if (host_id is not None + and not self.sshclient.get_host_map_info(host_id)): + if (chapinfo and self.sshclient._chapuser_added_to_initiator( + connector['initiator'], chapinfo[0])): + self.sshclient._remove_chap(connector['initiator'], chapinfo) + + info = {'driver_volume_type': 'iSCSI', + 'data': {'iqn': iqn}} + LOG.info('terminate_connection, return data is: %s.', info) + return info + + def _remove_iscsi_port(self, hostid, initiator): + """Remove iSCSI ports and delete host.""" + # Delete the host initiator if no LUN mapped to it. + port_num = 0 + port_info = self.sshclient.get_host_port_info(hostid) + if port_info: + port_num = len(port_info) + for port in port_info: + if port[2] == initiator: + self.sshclient.delete_hostport(port[0]) + port_num -= 1 + break + else: + LOG.warning('_remove_iscsi_port: iSCSI port was not found ' + 'on host %(hostid)s.', {'hostid': hostid}) + + # Delete host if no initiator added to it. + if port_num == 0: + self.sshclient.delete_host(hostid) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + self._stats = self.sshclient.get_volume_stats(refresh) + self._stats['storage_protocol'] = 'iSCSI' + self._stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return self._stats + + +class HuaweiTFCDriver(driver.FibreChannelDriver): + """FC driver for Huawei OceanStor T series storage arrays.""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(HuaweiTFCDriver, self).__init__(*args, **kwargs) + self.configuration = kwargs.get('configuration', None) + if not self.configuration: + msg = (_('_instantiate_driver: configuration not found.')) + raise exception.InvalidInput(reason=msg) + # zone manager + self.zm = None + + self.configuration.append_config_values(huawei_opts) + + def do_setup(self, context): + """Instantiate common class.""" + self.sshclient = ssh_client.TseriesClient( + configuration=self.configuration) + self.sshclient.do_setup(context) + self.sshclient.check_storage_pools() + + def check_for_setup_error(self): + """Check something while starting.""" + self.sshclient.check_for_setup_error() + + def create_volume(self, volume): + """Create a new volume.""" + return self.sshclient.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + return self.sshclient.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + return self.sshclient.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.sshclient.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.sshclient.delete_volume(volume) + + def create_export(self, context, volume, connector=None): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + snapshot_id = self.sshclient.create_snapshot(snapshot) + return {'provider_location': snapshot_id} + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.sshclient.delete_snapshot(snapshot) + + def validate_connector(self, connector): + """Check for wwpns in connector.""" + if 'wwpns' not in connector: + err_msg = _('validate_connector: The FC driver requires the' + ' wwpns in the connector.') + LOG.error(err_msg) + raise exception.InvalidConnectorException(missing='wwpns') + + @utils.synchronized('huawei_t_mount', external=False) + def initialize_connection(self, volume, connector): + """Create FC connection between a volume and a host.""" + LOG.debug('initialize_connection: volume name: %(vol)s, ' + 'host: %(host)s, initiator: %(wwn)s' + % {'vol': volume['name'], + 'host': connector['host'], + 'wwn': connector['wwpns']}) + lun_id = self.sshclient.check_volume_exist_on_array(volume) + if not lun_id: + msg = _("Volume %s not exists on the array.") % volume['id'] + raise exception.VolumeBackendAPIException(data=msg) + + self.sshclient.update_login_info() + # First, add a host if it is not added before. + host_id = self.sshclient.add_host(connector['host'], connector['ip']) + # Then, add free FC ports to the host. + wwns = connector['wwpns'] + if not self.zm: + self.zm = fczm_utils.create_zone_manager() + if self.zm: + # Use FC switch + zone_helper = fc_zone_helper.FCZoneHelper(self.zm, self.sshclient) + port_list = self.sshclient.get_all_fc_ports_from_array() + (tgt_port_wwns, + init_targ_map) = zone_helper.build_ini_tgt_map(wwns, host_id, + port_list, True) + + else: + free_wwns = self.sshclient.get_connected_free_wwns() + for wwn in free_wwns: + if wwn in wwns: + self.sshclient.add_fc_port_to_host(host_id, wwn) + fc_port_details = self.sshclient.get_host_port_details(host_id) + tgt_port_wwns = self._get_tgt_fc_port_wwns(fc_port_details) + + LOG.debug('initialize_connection: Target FC ports WWNS: %s' + % tgt_port_wwns) + + try: + hostlun_id = self.sshclient.map_volume(host_id, lun_id) + except Exception: + with excutils.save_and_reraise_exception(): + # Remove the FC port from the host if the map failed. + self._remove_fc_ports(host_id, wwns) + + properties = {} + properties['target_discovered'] = False + properties['target_wwn'] = tgt_port_wwns + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + + return {'driver_volume_type': 'fibre_channel', + 'data': properties} + + def _get_tgt_fc_port_wwns(self, port_details): + wwns = [] + for port in port_details: + wwns.append(port['TargetWWN']) + return wwns + + @utils.synchronized('huawei_t_mount', external=False) + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + wwns = connector['wwpns'] + host_name = connector['host'] + LOG.debug('terminate_connection: volume: %(vol)s, host: %(host)s, ' + 'connector: %(wwpns)s' + % {'vol': volume['name'], + 'host': host_name, + 'wwpns': wwns}) + + lun_id = self.sshclient.check_volume_exist_on_array(volume) + if not lun_id: + LOG.warning("Volume %s not exists on the array.", + volume['id']) + + self.sshclient.update_login_info() + host_id = self.sshclient.get_host_id(host_name) + self.sshclient.remove_map(lun_id, host_id) + + # Remove all FC ports and delete the host if no volume mapping to it. + if host_id and not self.sshclient.get_host_map_info(host_id): + self._delete_zone_and_remove_initiators(wwns, host_id) + + info = {'driver_volume_type': 'fibre_channel', + 'data': {'wwns': wwns}} + LOG.info('terminate_connection, return data is: %s.', info) + return info + + def _delete_zone_and_remove_initiators(self, wwns, host_id): + if host_id is None: + return + + self._remove_fc_ports(host_id, wwns) + + if not self.zm: + self.zm = fczm_utils.create_zone_manager() + if self.zm: + # Use FC switch, need to delete zone + # Use FC switch + zone_helper = fc_zone_helper.FCZoneHelper(self.zm, self.sshclient) + port_list = self.sshclient.get_all_fc_ports_from_array() + (tgt_port_wwns, + init_targ_map) = zone_helper.build_ini_tgt_map(wwns, host_id, + port_list, True) + self.zm.delete_connection(init_targ_map) + + def _remove_fc_ports(self, hostid, wwns): + """Remove FC ports and delete host.""" + port_num = 0 + port_info = self.sshclient.get_host_port_info(hostid) + if port_info: + port_num = len(port_info) + for port in port_info: + if port[2] in wwns: + self.sshclient.delete_hostport(port[0]) + port_num -= 1 + else: + LOG.warning('_remove_fc_ports: FC port was not found ' + 'on host %(hostid)s.', {'hostid': hostid}) + + if port_num == 0: + self.sshclient.delete_host(hostid) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + self._stats = self.sshclient.get_volume_stats(refresh) + self._stats['storage_protocol'] = 'FC' + self._stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return self._stats diff --git a/PowerVC/huawei_utils.py b/PowerVC/huawei_utils.py new file mode 100644 index 0000000..d9109c4 --- /dev/null +++ b/PowerVC/huawei_utils.py @@ -0,0 +1,305 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import json +import six +import time + +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_utils import units + +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +def encode_name(id): + encoded_name = hashlib.md5(id.encode('utf-8')).hexdigest() + prefix = id.split('-')[0] + '-' + postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)] + return prefix + postfix + + +def old_encode_name(id): + pre_name = id.split("-")[0] + vol_encoded = six.text_type(hash(id)) + if vol_encoded.startswith('-'): + newuuid = pre_name + vol_encoded + else: + newuuid = pre_name + '-' + vol_encoded + return newuuid + + +def encode_host_name(name): + if len(name) > constants.MAX_NAME_LENGTH: + return name[:constants.MAX_NAME_LENGTH] + return name + + +def old_encode_host_name(name): + if name and len(name) > constants.MAX_NAME_LENGTH: + name = six.text_type(hash(name)) + return name + + +def wait_for_condition(func, interval, timeout): + start_time = time.time() + + def _inner(): + try: + res = func() + except Exception as ex: + raise exception.VolumeBackendAPIException(data=ex) + + if res: + raise loopingcall.LoopingCallDone() + + if int(time.time()) - start_time > timeout: + msg = (_('wait_for_condition: %s timed out.') + % func.__name__) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + timer = loopingcall.FixedIntervalLoopingCall(_inner) + timer.start(interval=interval).wait() + + +def get_volume_size(volume): + """Calculate the volume size. + + We should divide the given volume size by 512 for the 18000 system + calculates volume size with sectors, which is 512 bytes. + """ + volume_size = units.Gi // 512 # 1G + if int(volume.size) != 0: + volume_size = int(volume.size) * units.Gi // 512 + + return volume_size + + +def get_volume_metadata(volume): + if isinstance(volume, objects.Volume): + return volume.metadata + if volume.get('volume_metadata'): + return {item['key']: item['value'] for item in + volume['volume_metadata']} + return {} + + +def get_admin_metadata(volume): + admin_metadata = {} + if 'admin_metadata' in volume: + admin_metadata = volume.admin_metadata + elif 'volume_admin_metadata' in volume: + metadata = volume.get('volume_admin_metadata', []) + admin_metadata = {item['key']: item['value'] for item in metadata} + + LOG.debug("Volume ID: %(id)s, admin_metadata: %(admin_metadata)s.", + {"id": volume.id, "admin_metadata": admin_metadata}) + return admin_metadata + + +def get_snapshot_metadata_value(snapshot): + if type(snapshot) is objects.Snapshot: + return snapshot.metadata + + if 'snapshot_metadata' in snapshot: + metadata = snapshot.snapshot_metadata + return {item['key']: item['value'] for item in metadata} + + return {} + + +def convert_connector_wwns(wwns): + if wwns: + return [wwn.lower() for wwn in wwns] + + +def to_string(**kwargs): + return json.dumps(kwargs) if kwargs else '' + + +def get_hypermetro_id(volume): + return get_lun_metadata(volume).get('hypermetro_id') + + +def get_lun_metadata(volume): + if not volume.provider_location: + return {} + + try: + info = json.loads(volume.provider_location) + except Exception as err: + LOG.warning("get_lun_metadata get provider_location error, params: " + "%(loc)s, reason: %(err)s", + {"loc": volume.provider_location, "err": err}) + return {} + + if isinstance(info, dict): + if "huawei" in volume.provider_location: + return info + else: + return {} + + # To keep compatible with old driver version + admin_metadata = get_admin_metadata(volume) + metadata = get_volume_metadata(volume) + return {'huawei_lun_id': six.text_type(info), + 'huawei_lun_wwn': admin_metadata.get('huawei_lun_wwn'), + 'huawei_sn': metadata.get('huawei_sn'), + 'hypermetro': True if metadata.get('hypermetro_id') else False + } + + +def get_snapshot_metadata(snapshot): + if not snapshot.provider_location: + return {} + + info = json.loads(snapshot.provider_location) + if isinstance(info, dict): + return info + + # To keep compatible with old driver version + metadata = get_snapshot_metadata_value(snapshot) + return {'huawei_snapshot_id': six.text_type(info), + 'huawei_snapshot_wwn': metadata.get('huawei_snapshot_wwn'), + } + + +def get_volume_lun_id(client, volume): + metadata = get_lun_metadata(volume) + + # First try the new encoded way. + volume_name = encode_name(volume.id) + lun_id = client.get_lun_id_by_name(volume_name) + + # If new encoded way not found, try the old encoded way. + if not lun_id: + volume_name = old_encode_name(volume.id) + lun_id = client.get_lun_id_by_name(volume_name) + + if not lun_id: + lun_id = metadata.get('huawei_lun_id') + + return lun_id, metadata.get('huawei_lun_wwn') + + +def get_lun_name(client, volume, raise_when_empty=False): + lun_info = get_lun_info(client, volume, raise_when_empty) + return lun_info.get("NAME") + + +def get_lun_info(client, volume, raise_when_empty=False): + lun_info = _get_lun_info(client, volume) + + if not lun_info and raise_when_empty: + msg = (_('Get lun %s info failed') % volume.id) + raise exception.VolumeBackendAPIException(data=msg) + + return lun_info + + +def _get_lun_info(client, volume): + # get volume via encode volume.id + volume_name = encode_name(volume.id) + lun_info = client.get_lun_info_by_name(volume_name) + if lun_info: + return lun_info + + metadata = get_lun_metadata(volume) + if not metadata: + return {} + + lun_id = metadata.get('huawei_lun_id') + lun_wwn = metadata.get('huawei_lun_wwn') + try: + lun_info = client.get_lun_info(lun_id) + if lun_info and lun_wwn == lun_info['WWN']: + return lun_info + except exception.VolumeBackendAPIException as e: + LOG.warning("Ignore exception when get lun info,detail info: %s", + e.msg) + + return {} + + +def get_snapshot_id(client, snapshot): + metadata = get_snapshot_metadata(snapshot) + snapshot_id = metadata.get('huawei_snapshot_id') + + # First try the new encoded way. + if not snapshot_id: + name = encode_name(snapshot.id) + snapshot_id = client.get_snapshot_id_by_name(name) + + # If new encoded way not found, try the old encoded way. + if not snapshot_id: + name = old_encode_name(snapshot.id) + snapshot_id = client.get_snapshot_id_by_name(name) + + return snapshot_id, metadata.get('huawei_snapshot_wwn') + + +def get_host_id(client, host_name): + encoded_name = encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + if encoded_name == host_name: + return host_id + + if not host_id: + encoded_name = old_encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + + return host_id + + +def check_feature_available(feature_status, features): + for f in features: + if feature_status.get(f) in constants.AVAILABLE_FEATURE_STATUS: + return True + + return False + + +def get_exist_host(client, wwns): + exist_host_ids = set() + exist_host_names = set() + for wwn in wwns: + ini = client.get_fc_initiator(wwn) + if ini and ini['ISFREE'] == 'false': + exist_host_ids.add(ini['PARENTID']) + exist_host_names.add(ini['PARENTNAME']) + + if len(exist_host_ids) > 1: + msg = _('There are more than 1 hosts initiators %s associated.' + ) % wwns + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not exist_host_ids: + return None, None + + return exist_host_ids.pop(), exist_host_names.pop() + + +def is_support_clone_pair(client): + array_info = client.get_array_info() + version_info = array_info['PRODUCTVERSION'] + return version_info >= constants.SUPPORT_CLONE_PAIR_VERSION diff --git a/PowerVC/hypermetro.py b/PowerVC/hypermetro.py new file mode 100644 index 0000000..3ee7b10 --- /dev/null +++ b/PowerVC/hypermetro.py @@ -0,0 +1,346 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import excutils + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + + +class HuaweiHyperMetro(object): + + def __init__(self, client, rmt_client, configuration): + self.client = client + self.rmt_client = rmt_client + self.configuration = configuration + + def create_hypermetro(self, local_lun_id, lun_params, is_sync=False): + """Create hypermetro.""" + + try: + # Check remote metro domain is valid. + domain_id = self._valid_rmt_metro_domain() + + # Get the remote pool info. + config_pool = self.rmt_client.storage_pools[0] + remote_pool = self.rmt_client.get_all_pools() + pool = self.rmt_client.get_pool_info(config_pool, remote_pool) + if not pool: + err_msg = _("Remote pool cannot be found.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create remote lun. + lun_params['PARENTID'] = pool['ID'] + remotelun_info = self.rmt_client.create_lun(lun_params) + remote_lun_id = remotelun_info['ID'] + + # Get hypermetro domain. + try: + self._wait_volume_ready(local_lun_id, True) + self._wait_volume_ready(remote_lun_id, False) + hypermetro = self._create_hypermetro_pair(domain_id, + local_lun_id, + remote_lun_id, + is_sync) + if is_sync: + try: + self._sync_metro(hypermetro['ID']) + except Exception as err: + with excutils.save_and_reraise_exception(): + LOG.error('Start synchronization failed. ' + 'Error: %s.', err) + self.check_metro_need_to_stop(hypermetro['ID']) + self.client.delete_hypermetro(hypermetro['ID']) + + LOG.info("Hypermetro id: %(metro_id)s. " + "Remote lun id: %(remote_lun_id)s.", + {'metro_id': hypermetro['ID'], + 'remote_lun_id': remote_lun_id}) + + return hypermetro['ID'] + except exception.VolumeBackendAPIException as err: + self.rmt_client.delete_lun(remote_lun_id) + msg = _('Create hypermetro error. %s.') % err + raise exception.VolumeBackendAPIException(data=msg) + except exception.VolumeBackendAPIException: + raise + + def delete_hypermetro(self, volume): + """Delete hypermetro.""" + lun_name = huawei_utils.get_lun_name(self.client, volume) + if not lun_name: + return + + hypermetro = self.client.get_hypermetro_by_lun_name(lun_name) + if not hypermetro: + return + + metro_id = hypermetro['ID'] + remote_lun_id = hypermetro['REMOTEOBJID'] + + # Delete hypermetro and remote lun. + self.check_metro_need_to_stop(metro_id, hypermetro) + self.client.delete_hypermetro(metro_id) + self.rmt_client.delete_lun(remote_lun_id) + + @utils.synchronized('huawei_create_hypermetro_pair', external=True) + def _create_hypermetro_pair(self, domain_id, lun_id, remote_lun_id, + is_sync=False): + """Create a HyperMetroPair.""" + hcp_param = {"DOMAINID": domain_id, + "HCRESOURCETYPE": '1', + "ISFIRSTSYNC": False, + "LOCALOBJID": lun_id, + "RECONVERYPOLICY": '1', + "REMOTEOBJID": remote_lun_id, + "SPEED": '4'} + if is_sync: + hcp_param.update({"ISFIRSTSYNC": True}) + + return self.client.create_hypermetro(hcp_param) + + def _wait_volume_ready(self, lun_id, local=True): + wait_interval = self.configuration.lun_ready_wait_interval + client = self.client if local else self.rmt_client + + def _volume_ready(): + result = client.get_lun_info(lun_id) + if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH + and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): + return True + return False + + huawei_utils.wait_for_condition(_volume_ready, + wait_interval, + wait_interval * 10) + + def create_consistencygroup(self, group): + LOG.info("Create Consistency Group: %(group)s.", + {'group': group['id']}) + group_name = huawei_utils.encode_name(group['id']) + domain_id = self._valid_rmt_metro_domain() + self.client.create_metrogroup(group_name, group['id'], domain_id) + + def delete_consistencygroup(self, context, group, volumes): + LOG.info("Delete Consistency Group: %(group)s.", + {'group': group['id']}) + metrogroup_id = self.check_consistencygroup_need_to_stop(group) + if not metrogroup_id: + return + + # Remove pair from metrogroup. + for volume in volumes: + metadata = huawei_utils.get_lun_metadata(volume) + if not metadata.get('hypermetro'): + continue + + lun_name = huawei_utils.get_lun_name(self.client, volume, True) + hypermetro = self.client.get_hypermetro_by_lun_name(lun_name) + if not hypermetro: + continue + + metro_id = hypermetro['ID'] + if self._check_metro_in_cg(metro_id, metrogroup_id): + self.client.remove_metro_from_metrogroup(metrogroup_id, + metro_id) + + # Delete metrogroup. + self.client.delete_metrogroup(metrogroup_id) + + def _ensure_hypermetro_added_to_cg(self, metro_id, metrogroup_id): + def _check_added(): + return self._check_metro_in_cg(metro_id, metrogroup_id) + + huawei_utils.wait_for_condition(_check_added, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + + def _ensure_hypermetro_removed_from_cg(self, metro_id, metrogroup_id): + def _check_removed(): + return not self._check_metro_in_cg(metro_id, metrogroup_id) + + huawei_utils.wait_for_condition(_check_removed, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + + def update_consistencygroup(self, context, group, + add_volumes, remove_volumes): + LOG.info("Update Consistency Group: %(group)s. " + "This adds or removes volumes from a CG.", + {'group': group['id']}) + + metrogroup_id = self.check_consistencygroup_need_to_stop(group) + if not metrogroup_id: + msg = _("The CG does not exist on array.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Deal with add volumes to CG + for volume in add_volumes: + metadata = huawei_utils.get_lun_metadata(volume) + if not metadata.get('hypermetro'): + err_msg = _("Volume %s is not in hypermetro pair.") % volume.id + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + lun_name = huawei_utils.get_lun_name(self.client, volume, True) + hypermetro = self.client.get_hypermetro_by_lun_name(lun_name) + if not hypermetro: + err_msg = _("Volume %s is not in hypermetro pair.") % volume.id + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + metro_id = hypermetro['ID'] + if not self._check_metro_in_cg(metro_id, metrogroup_id): + self.check_metro_need_to_stop(metro_id) + self.client.add_metro_to_metrogroup(metrogroup_id, + metro_id) + self._ensure_hypermetro_added_to_cg( + metro_id, metrogroup_id) + + # Deal with remove volumes from CG + for volume in remove_volumes: + metadata = huawei_utils.get_lun_metadata(volume) + if not metadata.get('hypermetro'): + continue + + lun_name = huawei_utils.get_lun_name(self.client, volume, True) + hypermetro = self.client.get_hypermetro_by_lun_name(lun_name) + if not hypermetro: + continue + + metro_id = hypermetro['ID'] + if self._check_metro_in_cg(metro_id, metrogroup_id): + self.check_metro_need_to_stop(metro_id) + self.client.remove_metro_from_metrogroup(metrogroup_id, + metro_id) + self._ensure_hypermetro_removed_from_cg( + metro_id, metrogroup_id) + self.client.sync_hypermetro(metro_id) + + new_group_info = self.client.get_metrogroup_by_id(metrogroup_id) + is_empty = new_group_info["ISEMPTY"] + if is_empty == 'false': + self.client.sync_metrogroup(metrogroup_id) + + def add_hypermetro_to_consistencygroup(self, group, metro_id): + metrogroup_id = self.check_consistencygroup_need_to_stop(group) + if metrogroup_id: + self.check_metro_need_to_stop(metro_id) + self.client.add_metro_to_metrogroup(metrogroup_id, metro_id) + self._ensure_hypermetro_added_to_cg(metro_id, metrogroup_id) + try: + self.client.sync_metrogroup(metrogroup_id) + except exception.VolumeBackendAPIException: + # Ignore this sync error. + LOG.warning('Resync metro group %(group)s failed ' + 'after add new metro %(metro)s.', + {'group': metrogroup_id, + 'metro': metro_id}) + + def check_metro_need_to_stop(self, metro_id, metro_info=None): + if not metro_info: + metro_info = self.client.get_hypermetro_by_id(metro_id) + + if metro_info: + metro_health_status = metro_info['HEALTHSTATUS'] + metro_running_status = metro_info['RUNNINGSTATUS'] + + if (metro_health_status == constants.HEALTH_NORMAL and + (metro_running_status == constants.RUNNING_NORMAL or + metro_running_status == constants.RUNNING_SYNC)): + self.client.stop_hypermetro(metro_id) + + def _get_metro_group_id(self, id): + group_name = huawei_utils.encode_name(id) + metrogroup_id = self.client.get_metrogroup_by_name(group_name) + + if not metrogroup_id: + group_name = huawei_utils.old_encode_name(id) + metrogroup_id = self.client.get_metrogroup_by_name(group_name) + + return metrogroup_id + + def check_consistencygroup_need_to_stop(self, group): + metrogroup_id = self._get_metro_group_id(group['id']) + if metrogroup_id: + self.stop_consistencygroup(metrogroup_id) + + return metrogroup_id + + def stop_consistencygroup(self, metrogroup_id): + metrogroup_info = self.client.get_metrogroup_by_id(metrogroup_id) + health_status = metrogroup_info['HEALTHSTATUS'] + running_status = metrogroup_info['RUNNINGSTATUS'] + + if (health_status == constants.HEALTH_NORMAL + and (running_status == constants.RUNNING_NORMAL + or running_status == constants.RUNNING_SYNC)): + self.client.stop_metrogroup(metrogroup_id) + + def _check_metro_in_cg(self, metro_id, cg_id): + metro_info = self.client.get_hypermetro_by_id(metro_id) + return (metro_info and metro_info['ISINCG'] == 'true' and + metro_info['CGID'] == cg_id) + + def _valid_rmt_metro_domain(self): + domain_name = self.rmt_client.metro_domain + if not domain_name: + err_msg = _("Hypermetro domain doesn't config.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + domain_id = self.rmt_client.get_hyper_domain_id(domain_name) + if not domain_id: + err_msg = _("Hypermetro domain cannot be found.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return domain_id + + def _is_sync_complete(self, metro_id): + metro_info = self.client.get_hypermetro_by_id(metro_id) + if (metro_info.get("HEALTHSTATUS") not in (constants.HEALTH_NORMAL, ) + or metro_info.get('RUNNINGSTATUS') not in + constants.METRO_SYNC_NORMAL): + msg = _("HyperMetro pair %(id)s is not in a available status, " + "RunningStatus is: %(run)s, HealthStatus is: %(health)s" + ) % {"id": metro_id, + "run": metro_info.get('RUNNINGSTATUS'), + "health": metro_info.get("HEALTHSTATUS")} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if metro_info.get('RUNNINGSTATUS') == constants.RUNNING_NORMAL: + return True + return False + + def _sync_metro(self, metro_id): + def _is_sync_complete(): + return self._is_sync_complete(metro_id) + + try: + self.client.sync_hypermetro(metro_id) + huawei_utils.wait_for_condition( + _is_sync_complete, constants.HYPERMETRO_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + except Exception as err: + raise exception.VolumeBackendAPIException(data=err) diff --git a/PowerVC/replication.py b/PowerVC/replication.py new file mode 100644 index 0000000..401918e --- /dev/null +++ b/PowerVC/replication.py @@ -0,0 +1,1022 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import json + +from oslo_log import log as logging +from oslo_utils import excutils + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + + +class ReplicaCG(object): + def __init__(self, local_client, rmt_client, conf): + self.loc_client = local_client + self.rmt_client = rmt_client + self.conf = conf + self.op = PairOp(self.loc_client) + self.local_cgop = CGOp(self.loc_client) + self.rmt_cgop = CGOp(self.rmt_client) + self.driver = ReplicaCommonDriver(self.conf, self.op) + + def create(self, group, replica_model): + group_id = group.get('id') + LOG.info("Create Consistency Group: %(group)s.", + {'group': group_id}) + group_name = huawei_utils.encode_name(group_id) + self.local_cgop.create(group_name, group_id, replica_model) + + def delete(self, group, volumes): + group_id = group.get('id') + LOG.info("Delete Consistency Group: %(group)s.", + {'group': group_id}) + group_info = self._get_group_info_by_name(group_id) + replicg_id = group_info.get('ID', None) + + if replicg_id: + if group_info.get('ISPRIMARY') == 'false': + msg = _("The CG is not primary, can't delete cg.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + self.split_replicg(group_info) + for volume in volumes: + replica_data = get_replication_driver_data(volume) + pair_id = replica_data.get('pair_id') + if pair_id and self.op.check_pair_exist(pair_id): + pair_info = self.op.get_replica_info(pair_id) + if pair_info.get('CGID') == replicg_id: + self.local_cgop.remove_pair_from_cg(replicg_id, + pair_id) + else: + LOG.warning(("The replication pair %(pair)s " + "is not in the consisgroup " + "%(group)s.") + % {'pair': pair_id, + 'group': replicg_id}) + else: + LOG.warning("Replication pair doesn't exist on array.") + self.local_cgop.delete(replicg_id) + + def update(self, group, add_volumes, remove_volumes, replica_model): + group_id = group.get('id') + LOG.info("Update Consistency Group: %(group)s.", + {'group': group_id}) + group_info = self._get_group_info_by_name(group_id) + replicg_id = group_info.get('ID', None) + + if replicg_id: + if group_info.get('ISPRIMARY') == 'false': + msg = _("The CG is not primary, can't operate cg.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.split_replicg(group_info) + self._deal_add_volumes(replicg_id, add_volumes) + self._deal_remove_volumes(replicg_id, remove_volumes, + replica_model) + + self.local_cgop.sync_replicg(replicg_id) + else: + msg = _("The CG does not exist on array.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def add_replica_to_group(self, group_id, replica): + group_info = self._get_group_info_by_name(group_id) + if not group_info: + msg = _("The CG %s does not exist on array.") % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if group_info.get('ISPRIMARY') == 'false': + msg = _("The CG is not primary, can't operate cg.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + replica_data = json.loads(replica) + pair_id = replica_data['pair_id'] + replicg_id = group_info['ID'] + + self.split_replicg(group_info) + self.driver.split(pair_id) + self.local_cgop.add_pair_to_cg(replicg_id, pair_id) + self.local_cgop.sync_replicg(replicg_id) + + def failover(self, replicg_id): + """Failover replicationcg. + + Purpose: + 1. Split replicationcg. + 2. Set secondary access read & write. + """ + info = self.rmt_cgop.get_replicg_info(replicg_id) + if info.get('ISPRIMARY') == 'true': + msg = _('We should not do switch over on primary array.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + sync_status_set = (constants.REPLICG_STATUS_SYNCING,) + running_status = info.get('RUNNINGSTATUS') + if running_status in sync_status_set: + self.wait_replicg_ready(replicg_id) + + self.split_replicg(info) + self.rmt_cgop.set_cg_second_access(replicg_id, + constants.REPLICA_SECOND_RW) + + def failback(self, replicg_id): + """Failover volumes back to primary backend. + + The main steps: + 1. Switch the role of replicationcg . + 2. Copy the second LUN data back to primary LUN. + 3. Split replicationcg . + 4. Switch the role of replicationcg . + 5. Enable replicationcg. + """ + self.enable(replicg_id, self.local_cgop) + self.failover(replicg_id) + self.enable(replicg_id, self.rmt_cgop) + + def enable(self, replicg_id, client): + info = client.get_replicg_info(replicg_id) + running_status = info.get('RUNNINGSTATUS') + if running_status != constants.REPLICG_STATUS_SPLITED: + client.split_replicg(replicg_id) + self.wait_split_ready(replicg_id) + + if info.get('ISPRIMARY') == 'false': + client.switch_replicg(replicg_id) + + client.set_cg_second_access(replicg_id, constants.REPLICA_SECOND_RO) + client.sync_replicg(replicg_id) + self.wait_replicg_ready(replicg_id) + + def _deal_add_volumes(self, replicg_id, add_volumes): + for volume in add_volumes: + replica_data = get_replication_driver_data(volume) + pair_id = replica_data.get('pair_id') + if pair_id and self.op.check_pair_exist(pair_id): + pair_info = self.op.get_replica_info(pair_id) + if pair_info.get('ISPRIMARY') == 'true': + self.driver.split(pair_id) + self.local_cgop.add_pair_to_cg(replicg_id, pair_id) + else: + msg = _("The replication pair is not primary.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + err_msg = _("Replication pair doesn't exist on array.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _deal_remove_volumes(self, replicg_id, remove_volumes, replica_model): + for volume in remove_volumes: + replica_data = get_replication_driver_data(volume) + pair_id = replica_data.get('pair_id') + if pair_id and self.op.check_pair_exist(pair_id): + pair_info = self.op.get_replica_info(pair_id) + if pair_info.get('CGID') == replicg_id: + self.local_cgop.remove_pair_from_cg(replicg_id, pair_id) + wait_complete = False + if replica_model == constants.REPLICA_SYNC_MODEL: + wait_complete = True + self.driver.sync(pair_id, wait_complete) + else: + LOG.warning(("The replication pair %(pair)s is not " + "in the consisgroup %(group)s.") + % {'pair': pair_id, 'group': replicg_id}) + else: + LOG.warning("Replication pair doesn't exist on array.") + + def _get_group_info_by_name(self, group_id): + group_name = huawei_utils.encode_name(group_id) + group_info = self.local_cgop.get_replicg_by_name(group_name) + if not group_info: + group_name = huawei_utils.old_encode_name(group_id) + group_info = self.local_cgop.get_replicg_by_name(group_name) + return group_info + + def split_replicg(self, group_info): + if group_info.get('ISEMPTY') == 'true': + return + + running_status = group_info.get('RUNNINGSTATUS') + if running_status == constants.REPLICG_STATUS_INVALID: + err_msg = _("Replicg is invalid.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + elif running_status in (constants.REPLICG_STATUS_INTERRUPTED, + constants.REPLICG_STATUS_SPLITED): + return + + replicg_id = group_info.get('ID') + self.rmt_cgop.split_replicg(replicg_id) + self.wait_split_ready(replicg_id) + + def wait_split_ready(self, replicg_id): + def _check_state(): + info = self.rmt_cgop.get_replicg_info(replicg_id) + if info.get('RUNNINGSTATUS') in ( + constants.REPLICG_STATUS_SPLITED, + constants.REPLICG_STATUS_INTERRUPTED): + return True + return False + + interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL + timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT + huawei_utils.wait_for_condition(_check_state, interval, timeout) + + def wait_replicg_ready(self, replicg_id, interval=None, timeout=None): + LOG.info('Wait synchronize complete.') + running_status_normal = (constants.REPLICG_STATUS_NORMAL,) + running_status_sync = (constants.REPLICG_STATUS_SYNCING,) + + def _replicg_ready(): + info = self.rmt_cgop.get_replicg_info(replicg_id) + if (info.get('RUNNINGSTATUS') in running_status_normal and + info.get('HEALTHSTATUS') == + constants.REPLICG_HEALTH_NORMAL): + return True + + if info.get('RUNNINGSTATUS') not in running_status_sync: + msg = (_('Wait synchronize failed. Running status: %s.') % + info.get('RUNNINGSTATUS')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return False + + if not interval: + interval = constants.DEFAULT_WAIT_INTERVAL + if not timeout: + timeout = constants.DEFAULT_WAIT_TIMEOUT + + huawei_utils.wait_for_condition(_replicg_ready, + interval, + timeout) + + +class AbsReplicaOp(object): + def __init__(self, client): + self.client = client + + def create(self, **kwargs): + pass + + def delete(self, replica_id): + pass + + def protect_second(self, replica_id): + pass + + def unprotect_second(self, replica_id): + pass + + def sync(self, replica_id): + pass + + def split(self, replica_id): + pass + + def switch(self, replica_id): + pass + + def is_primary(self, replica_info): + flag = replica_info.get('ISPRIMARY') + if flag and flag.lower() == 'true': + return True + return False + + def get_replica_info(self, replica_id): + return {} + + def _is_status(self, status_key, status, replica_info): + if type(status) in (list, tuple): + return replica_info.get(status_key, '') in status + if type(status) is str: + return replica_info.get(status_key, '') == status + + return False + + def is_running_status(self, status, replica_info): + return self._is_status(constants.REPLICA_RUNNING_STATUS_KEY, + status, replica_info) + + def is_health_status(self, status, replica_info): + return self._is_status(constants.REPLICA_HEALTH_STATUS_KEY, + status, replica_info) + + +class PairOp(AbsReplicaOp): + def create(self, local_lun_id, rmt_lun_id, rmt_dev_id, + rmt_dev_name, replica_model, + speed=constants.REPLICA_SPEED, + period=constants.REPLICA_PERIOD, + **kwargs): + super(PairOp, self).create(**kwargs) + + params = { + "LOCALRESID": local_lun_id, + "LOCALRESTYPE": '11', + "REMOTEDEVICEID": rmt_dev_id, + "REMOTEDEVICENAME": rmt_dev_name, + "REMOTERESID": rmt_lun_id, + "REPLICATIONMODEL": replica_model, + # recovery policy. 1: auto, 2: manual + "RECOVERYPOLICY": '1', + "SPEED": speed, + } + + if replica_model == constants.REPLICA_ASYNC_MODEL: + # Synchronize type values: + # 1, manual + # 2, timed wait when synchronization begins + # 3, timed wait when synchronization ends + params['SYNCHRONIZETYPE'] = '2' + params['TIMINGVAL'] = period + + try: + pair_info = self.client.create_pair(params) + except Exception as err: + msg = _('Create replication pair failed. Error: %s.') % err + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return pair_info + + def split(self, pair_id): + self.client.split_pair(pair_id) + + def delete(self, pair_id, force=False): + self.client.delete_pair(pair_id, force) + + def protect_second(self, pair_id): + self.client.set_pair_second_access(pair_id, + constants.REPLICA_SECOND_RO) + + def unprotect_second(self, pair_id): + self.client.set_pair_second_access(pair_id, + constants.REPLICA_SECOND_RW) + + def sync(self, pair_id): + self.client.sync_pair(pair_id) + + def switch(self, pair_id): + self.client.switch_pair(pair_id) + + def get_replica_info(self, pair_id): + return self.client.get_pair_by_id(pair_id) + + def check_pair_exist(self, pair_id): + return self.client.check_pair_exist(pair_id) + + +class CGOp(AbsReplicaOp): + def create(self, group_name, group_id, replica_model): + data = {'NAME': group_name, + 'DESCRIPTION': group_id, + 'RECOVERYPOLICY': '1', + 'REPLICATIONMODEL': replica_model, + 'SPEED': constants.REPLICA_SPEED} + + if replica_model == constants.REPLICA_ASYNC_MODEL: + # Synchronize type values: + # 1, manual + # 2, timed wait when synchronization begins + # 3, timed wait when synchronization ends + data['SYNCHRONIZETYPE'] = '2' + data['TIMINGVAL'] = constants.REPLICG_PERIOD + + self.client.create_replicg(data) + + def delete(self, replicg_id): + self.client.delete_replicg(replicg_id) + + def remove_pair_from_cg(self, replicg_id, pair_id): + self.client.remove_replipair_from_replicg(replicg_id, + [pair_id]) + + def add_pair_to_cg(self, replicg_id, pair_id): + self.client.add_replipair_to_replicg(replicg_id, + [pair_id]) + + def sync_replicg(self, replicg_id): + self.client.sync_replicg(replicg_id) + + def get_replicg_info(self, replicg_id): + info = self.client.get_replicg_info(replicg_id) + return info + + def split_replicg(self, replicg_id): + self.client.split_replicg(replicg_id) + + def get_replicg_by_name(self, group_name): + info = self.client.get_replicg_by_name(group_name) + return info + + def set_cg_second_access(self, replicg_id, access): + self.client.set_cg_second_access(replicg_id, access) + + def switch_replicg(self, replicg_id): + self.client.switch_replicg(replicg_id) + + +class ReplicaCommonDriver(object): + def __init__(self, conf, replica_op): + self.conf = conf + self.op = replica_op + + def protect_second(self, replica_id): + info = self.op.get_replica_info(replica_id) + if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RO: + return + + self.op.protect_second(replica_id) + self.wait_second_access(replica_id, constants.REPLICA_SECOND_RO) + + def unprotect_second(self, replica_id): + info = self.op.get_replica_info(replica_id) + if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RW: + return + + self.op.unprotect_second(replica_id) + self.wait_second_access(replica_id, constants.REPLICA_SECOND_RW) + + def sync(self, replica_id, wait_complete=False): + self.protect_second(replica_id) + + expect_status = (constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SYNC, + constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) + info = self.op.get_replica_info(replica_id) + + # When running status is synchronizing or normal, + # it's not necessary to do synchronize again. + if (info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL + and self.op.is_running_status(expect_status, info)): + return + + self.op.sync(replica_id) + self.wait_expect_state(replica_id, expect_status) + + if wait_complete: + self.wait_replica_ready(replica_id) + + def split(self, replica_id): + running_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INVALID, + constants.REPLICA_RUNNING_STATUS_ERRUPTED) + info = self.op.get_replica_info(replica_id) + if self.op.is_running_status(running_status, info): + return + + try: + self.op.split(replica_id) + except Exception as err: + LOG.warning('Split replication exception: %s.', err) + + try: + self.wait_expect_state(replica_id, running_status) + except Exception as err: + msg = _('Split replication failed.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def enable(self, replica_id, wait_sync_complete=False): + info = self.op.get_replica_info(replica_id) + if not self.op.is_primary(info): + self.switch(replica_id) + self.sync(replica_id) + return None + + def switch(self, replica_id): + self.split(replica_id) + self.unprotect_second(replica_id) + self.op.switch(replica_id) + + # Wait to be primary + def _wait_switch_to_primary(): + info = self.op.get_replica_info(replica_id) + if self.op.is_primary(info): + return True + return False + + interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL + timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT + huawei_utils.wait_for_condition(_wait_switch_to_primary, + interval, + timeout) + + def failover(self, replica_id): + """Failover replication. + + Purpose: + 1. Split replication. + 2. Set secondary access read & write. + """ + info = self.op.get_replica_info(replica_id) + if self.op.is_primary(info): + msg = _('We should not do switch over on primary array.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + sync_status_set = (constants.REPLICA_RUNNING_STATUS_SYNC, + constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) + if self.op.is_running_status(sync_status_set, info): + self.wait_replica_ready(replica_id) + + self.split(replica_id) + self.op.unprotect_second(replica_id) + + def wait_replica_ready(self, replica_id, interval=None, timeout=None): + LOG.debug('Wait synchronize complete.') + running_status_normal = (constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SYNCED) + running_status_sync = (constants.REPLICA_RUNNING_STATUS_SYNC, + constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) + health_status_normal = constants.REPLICA_HEALTH_STATUS_NORMAL + + def _replica_ready(): + info = self.op.get_replica_info(replica_id) + if (self.op.is_running_status(running_status_normal, info) + and self.op.is_health_status(health_status_normal, info)): + return True + + if not self.op.is_running_status(running_status_sync, info): + msg = (_('Wait synchronize failed. Running status: %s.') % + info.get(constants.REPLICA_RUNNING_STATUS_KEY)) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return False + + if not interval: + interval = constants.DEFAULT_WAIT_INTERVAL + if not timeout: + timeout = constants.DEFAULT_WAIT_TIMEOUT + + huawei_utils.wait_for_condition(_replica_ready, + interval, + timeout) + + def wait_second_access(self, replica_id, access_level): + def _check_access(): + info = self.op.get_replica_info(replica_id) + if info.get('SECRESACCESS') == access_level: + return True + return False + + interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL + timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT + huawei_utils.wait_for_condition(_check_access, + interval, + timeout) + + def wait_expect_state(self, replica_id, + running_status, health_status=None, + interval=None, timeout=None): + def _check_state(): + info = self.op.get_replica_info(replica_id) + if self.op.is_running_status(running_status, info): + if (not health_status + or self.op.is_health_status(health_status, info)): + return True + return False + + if not interval: + interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL + if not timeout: + timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT + + huawei_utils.wait_for_condition(_check_state, interval, timeout) + + +def get_replication_driver_data(volume): + if volume.replication_driver_data: + return json.loads(volume.replication_driver_data) + + return {} + + +def to_string(dict_data): + if dict_data: + return json.dumps(dict_data) + return '' + + +class ReplicaPairManager(object): + def __init__(self, local_client, rmt_client, conf): + self.local_client = local_client + self.rmt_client = rmt_client + self.conf = conf + + # Now just support one remote pool. + self.rmt_pool = self.rmt_client.storage_pools[0] + + self.local_op = PairOp(self.local_client) + self.local_driver = ReplicaCommonDriver(self.conf, self.local_op) + self.rmt_op = PairOp(self.rmt_client) + self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op) + + def try_get_remote_wwn(self): + try: + info = self.rmt_client.get_array_info() + return info.get('wwn') + except Exception as err: + LOG.warning('Get remote array wwn failed. Error: %s.', err) + return None + + def get_remote_device_by_wwn(self, wwn): + devices = {} + try: + devices = self.local_client.get_remote_devices() + except Exception as err: + LOG.warning('Get remote devices failed. Error: %s.', err) + + for device in devices: + if device.get('WWN') == wwn: + return device + + return {} + + def check_remote_available(self): + # We get device wwn in every check time. + # If remote array changed, we can run normally. + wwn = self.try_get_remote_wwn() + if not wwn: + return False + + device = self.get_remote_device_by_wwn(wwn) + # Check remote device is available to use. + # If array type is replication, 'ARRAYTYPE' == '1'. + # If health status is normal, 'HEALTHSTATUS' == '1'. + if (device and device.get('ARRAYTYPE') == '1' + and device.get('HEALTHSTATUS') == '1' + and device.get('RUNNINGSTATUS') == constants.STATUS_RUNNING): + return True + + return False + + def update_replica_capability(self, stats): + is_rmt_dev_available = self.check_remote_available() + if not is_rmt_dev_available: + LOG.warning('Remote device is unavailable.') + return stats + + for pool in stats['pools']: + pool['replication_enabled'] = True + pool['replication_type'] = ['sync', 'async'] + + return stats + + def get_rmt_dev_info(self): + wwn = self.try_get_remote_wwn() + if not wwn: + return None, None, None + + device = self.get_remote_device_by_wwn(wwn) + if not device: + return None, None, None + + return device.get('ID'), device.get('NAME'), device.get('SN') + + def build_rmt_lun_params(self, local_lun_info): + params = { + 'NAME': local_lun_info['NAME'], + 'PARENTID': self.rmt_client.get_pool_id(self.rmt_pool), + 'DESCRIPTION': local_lun_info['DESCRIPTION'], + 'ALLOCTYPE': local_lun_info['ALLOCTYPE'], + 'CAPACITY': local_lun_info['CAPACITY'], + 'WRITEPOLICY': local_lun_info['WRITEPOLICY'], + } + + for k in ('DATATRANSFERPOLICY', 'PREFETCHPOLICY', 'PREFETCHVALUE', + 'READCACHEPOLICY', 'WRITECACHEPOLICY'): + if k in local_lun_info: + params[k] = local_lun_info[k] + + LOG.debug('Remote lun params: %s.', params) + return params + + def wait_volume_online(self, client, lun_info, + interval=None, timeout=None): + online_status = constants.STATUS_VOLUME_READY + if lun_info.get('RUNNINGSTATUS') == online_status: + return + + lun_id = lun_info['ID'] + + def _wait_online(): + info = client.get_lun_info(lun_id) + return info.get('RUNNINGSTATUS') == online_status + + if not interval: + interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL + if not timeout: + timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT + + huawei_utils.wait_for_condition(_wait_online, + interval, + timeout) + + def create_rmt_lun(self, local_lun_info): + # Create on rmt array. If failed, raise exception. + lun_params = self.build_rmt_lun_params(local_lun_info) + lun_info = self.rmt_client.create_lun(lun_params) + try: + self.wait_volume_online(self.rmt_client, lun_info) + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + self.rmt_client.delete_lun(lun_info['ID']) + + return lun_info + + def create_replica(self, local_lun_info, replica_model): + """Create remote LUN and replication pair. + + Purpose: + 1. create remote lun + 2. create replication pair + 3. enable replication pair + """ + LOG.debug(('Create replication, local lun info: %(info)s, ' + 'replication model: %(model)s.'), + {'info': local_lun_info, 'model': replica_model}) + + local_lun_id = local_lun_info['ID'] + self.wait_volume_online(self.local_client, local_lun_info) + + # step1, create remote lun + rmt_lun_info = self.create_rmt_lun(local_lun_info) + rmt_lun_id = rmt_lun_info['ID'] + + # step2, get remote device info + rmt_dev_id, rmt_dev_name, rmt_dev_sn = self.get_rmt_dev_info() + if not rmt_lun_id or not rmt_dev_name: + self._delete_rmt_lun(rmt_lun_id) + msg = _('Get remote device info failed.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # step3, create replication pair + try: + pair_info = self.local_op.create(local_lun_id, + rmt_lun_id, rmt_dev_id, + rmt_dev_name, replica_model) + pair_id = pair_info['ID'] + except Exception as err: + with excutils.save_and_reraise_exception(): + LOG.error('Create pair failed. Error: %s.', err) + self._delete_rmt_lun(rmt_lun_id) + + # step4, start sync manually. If replication type is sync, + # then wait for sync complete. + wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL) + try: + self.local_driver.sync(pair_id, wait_complete) + except Exception as err: + with excutils.save_and_reraise_exception(): + LOG.error('Start synchronization failed. Error: %s.', err) + self._delete_pair(pair_id) + self._delete_rmt_lun(rmt_lun_id) + + model_update = {} + driver_data = {'pair_id': pair_id, + 'huawei_sn': rmt_dev_sn, + 'rmt_lun_id': rmt_lun_id, + 'rmt_lun_wwn': rmt_lun_info['WWN']} + model_update['replication_driver_data'] = to_string(driver_data) + model_update['replication_status'] = 'available' + LOG.debug('Create replication, return info: %s.', model_update) + return model_update + + def _delete_pair(self, pair_id): + if (not pair_id + or not self.local_client.check_pair_exist(pair_id)): + return + + self.local_driver.split(pair_id) + self.local_op.delete(pair_id) + + def _delete_rmt_lun(self, lun_id): + if lun_id and self.rmt_client.check_lun_exist(lun_id): + self.rmt_client.delete_lun(lun_id) + + def delete_replica(self, volume, replication_driver_data=None): + """Delete replication pair and remote lun. + + Purpose: + 1. delete replication pair + 2. delete remote_lun + """ + LOG.debug('Delete replication, volume: %s.', volume.id) + if replication_driver_data: + info = json.loads(replication_driver_data) + else: + info = get_replication_driver_data(volume) + + pair_id = info.get('pair_id') + if pair_id: + self._delete_pair(pair_id) + + # Delete remote_lun + rmt_lun_id = info.get('rmt_lun_id') + if rmt_lun_id: + self._delete_rmt_lun(rmt_lun_id) + + def _pre_fail_check(self, vol, running_status_set, + data_status_set=None): + # check the replica_pair status + vol_name = huawei_utils.get_lun_name(self.local_client, vol, True) + vol_id = self.local_client.get_lun_id_by_name(vol_name) + pair_info = self.local_client.get_pair_info_by_lun_id(vol_id) + if pair_info: + running_status = self.local_op.is_running_status( + running_status_set, pair_info) + data_status = self.local_op.is_data_status( + data_status_set, pair_info) if data_status_set else True + + if not (running_status and data_status): + msg = _('Replication pair %(id)s is not at the status ' + 'failover/failback available, RUNNINGSTATUS: ' + '%(run)s, SECRESDATASTATUS: %(sec)s.' + ) % {'id': pair_info['ID'], + 'run': pair_info['RUNNINGSTATUS'], + 'sec': pair_info['SECRESDATASTATUS']} + LOG.error(msg) + raise exception.InvalidReplicationTarget(reason=msg) + + def failback(self, volumes): + """Failover volumes back to primary backend. + + The main steps: + 1. Switch the role of replication pairs. + 2. Copy the second LUN data back to primary LUN. + 3. Split replication pairs. + 4. Switch the role of replication pairs. + 5. Enable replications. + """ + running_status_set = ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_ERRUPTED) + + volumes_update = [] + cgid_list = set() + replicacg = ReplicaCG(self.local_client, self.rmt_client, self.conf) + for v in volumes: + drv_data = get_replication_driver_data(v) + pair_id = drv_data.get('pair_id') + if not pair_id: + self._pre_fail_check(v, running_status_set) + + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + drv_data = get_replication_driver_data(v) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("No pair id in volume %s.", v.id) + v_update['updates'] = {'replication_status': 'error'} + volumes_update.append(v_update) + continue + + rmt_lun_id = drv_data.get('rmt_lun_id') + if not rmt_lun_id: + LOG.warning("No remote lun id in volume %s.", v.id) + v_update['updates'] = {'replication_status': 'error'} + volumes_update.append(v_update) + continue + + replica_info = self.local_op.get_replica_info(pair_id) + consisgroup_id = replica_info.get('CGID') + if consisgroup_id: + if consisgroup_id not in cgid_list: + replicacg.failback(consisgroup_id) + cgid_list.add(consisgroup_id) + else: + # Switch replication pair role, and start synchronize. + self.local_driver.enable(pair_id) + + # Wait for synchronize complete. + self.local_driver.wait_replica_ready(pair_id) + + # Split replication pair again + self.rmt_driver.failover(pair_id) + + # Switch replication pair role, and start synchronize. + self.rmt_driver.enable(pair_id) + + local_metadata = huawei_utils.get_lun_metadata(v) + new_drv_data = to_string( + {'pair_id': pair_id, + 'huawei_sn': local_metadata.get('huawei_sn'), + 'rmt_lun_id': local_metadata.get('huawei_lun_id'), + 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) + location = huawei_utils.to_string( + huawei_lun_id=rmt_lun_id, huawei_sn=drv_data.get('huawei_sn'), + huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) + + v_update['updates'] = {'provider_location': location, + 'replication_status': 'available', + 'replication_driver_data': new_drv_data} + volumes_update.append(v_update) + + return volumes_update + + def failover(self, volumes): + """Failover volumes back to secondary array. + + Split the replication pairs and make the secondary LUNs R&W. + """ + running_status_set = ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_ERRUPTED) + data_status_set = ( + constants.REPLICA_DATA_STATUS_SYNCED, + constants.REPLICA_DATA_STATUS_COMPLETE) + + volumes_update = [] + cgid_list = set() + replicacg = ReplicaCG(self.local_client, self.rmt_client, self.conf) + for v in volumes: + drv_data = get_replication_driver_data(v) + pair_id = drv_data.get('pair_id') + if not pair_id: + self._pre_fail_check(v, running_status_set, data_status_set) + + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + drv_data = get_replication_driver_data(v) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("No pair id in volume %s.", v.id) + v_update['updates'] = {'replication_status': 'error'} + volumes_update.append(v_update) + continue + + rmt_lun_id = drv_data.get('rmt_lun_id') + if not rmt_lun_id: + LOG.warning("No remote lun id in volume %s.", v.id) + v_update['updates'] = {'replication_status': 'error'} + volumes_update.append(v_update) + continue + + replica_info = self.rmt_op.get_replica_info(pair_id) + consisgroup_id = replica_info.get('CGID') + if consisgroup_id: + if consisgroup_id not in cgid_list: + replicacg.failover(consisgroup_id) + cgid_list.add(consisgroup_id) + else: + self.rmt_driver.failover(pair_id) + + local_metadata = huawei_utils.get_lun_metadata(v) + new_drv_data = to_string( + {'pair_id': pair_id, + 'huawei_sn': local_metadata.get('huawei_sn'), + 'rmt_lun_id': local_metadata.get('huawei_lun_id'), + 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) + location = huawei_utils.to_string( + huawei_lun_id=rmt_lun_id, huawei_sn=drv_data.get('huawei_sn'), + huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) + + v_update['updates'] = {'provider_location': location, + 'replication_status': 'failed-over', + 'replication_driver_data': new_drv_data} + volumes_update.append(v_update) + + return volumes_update + + def split_replica(self, pair_id): + self.local_driver.split(pair_id) + + +def get_replication_opts(opts): + if opts.get('replication_type') == 'sync': + opts['replication_type'] = constants.REPLICA_SYNC_MODEL + else: + opts['replication_type'] = constants.REPLICA_ASYNC_MODEL + + return opts diff --git a/PowerVC/rest_client.py b/PowerVC/rest_client.py new file mode 100644 index 0000000..3742ebb --- /dev/null +++ b/PowerVC/rest_client.py @@ -0,0 +1,2763 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import netaddr +import requests +import six +import threading +import time + +from oslo_concurrency import lockutils +from oslo_log import log as logging +from oslo_utils import excutils +from requests.adapters import HTTPAdapter + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + + +class HostNameIgnoringAdapter(HTTPAdapter): + def cert_verify(self, conn, url, verify, cert): + conn.assert_hostname = False + return super(HostNameIgnoringAdapter, self).cert_verify( + conn, url, verify, cert) + + +class RestClient(object): + """Common class for Huawei OceanStor storage system.""" + + def __init__(self, configuration, san_address, san_user, san_password, + **kwargs): + self.configuration = configuration + self.san_address = san_address + self.san_user = san_user + self.san_password = san_password + self.vstore_name = kwargs.get('vstore_name', None) + self.storage_pools = kwargs.get('storage_pools', + self.configuration.storage_pools) + self.iscsi_info = kwargs.get('iscsi_info', + self.configuration.iscsi_info) + self.fc_info = kwargs.get('fc_info', self.configuration.fc_info) + self.iscsi_default_target_ip = kwargs.get( + 'iscsi_default_target_ip', + self.configuration.iscsi_default_target_ip) + self.metro_domain = kwargs.get('metro_domain', None) + self.semaphore = threading.Semaphore(20) + self.call_lock = lockutils.ReaderWriterLock() + self.session = None + self.url = None + self.ssl_cert_verify = self.configuration.ssl_cert_verify + self.ssl_cert_path = self.configuration.ssl_cert_path + + if not self.ssl_cert_verify and hasattr(requests, 'packages'): + LOG.warning("Suppressing requests library SSL Warnings") + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecureRequestWarning) + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecurePlatformWarning) + + def init_http_head(self): + self.url = None + self.session = requests.Session() + self.session.headers.update({ + "Connection": "keep-alive", + "Content-Type": "application/json"}) + + self.session.verify = self.ssl_cert_path if self.ssl_cert_verify else False + + def do_call(self, url=None, data=None, method=None, + calltimeout=constants.SOCKET_TIMEOUT, filter_flag=False): + """Send requests to Huawei storage server. + + Send HTTPS call, get response in JSON. + Convert response into Python Object and return it. + """ + if self.url: + url = self.url + url + + kwargs = {'timeout': calltimeout} + if data: + kwargs['data'] = json.dumps(data) + + if method in (None, 'POST'): + func = self.session.post + elif method in ('PUT',): + func = self.session.put + elif method in ('GET',): + func = self.session.get + elif method in ('DELETE',): + func = self.session.delete + else: + msg = _("Request method %s is invalid.") % method + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.semaphore.acquire() + + try: + res = func(url, **kwargs) + except Exception as exc: + if "BadStatusLine" in six.text_type(exc): + return {"error": { + "code": constants.ERROR_BAD_STATUS_LINE, + "description": "BadStatusLine."}} + LOG.exception('Bad response from server: %(url)s.' + ' Error: %(err)s', + {'url': url, 'err': six.text_type(exc)}) + return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER, + "description": "Connect to server error."} + } + finally: + self.semaphore.release() + + try: + res.raise_for_status() + except requests.HTTPError as exc: + return {"error": {"code": exc.response.status_code, + "description": six.text_type(exc)} + } + + res_json = res.json() + if not filter_flag: + LOG.info('\nRequest URL: %(url)s\n' + 'Call Method: %(method)s\n' + 'Request Data: %(data)s\n' + 'Response Data:%(res)s', + {'url': url, + 'method': method, + 'data': data, + 'res': res_json}) + + return res_json + + def login(self): + """Login Huawei storage array.""" + device_id = None + for item_url in self.san_address: + url = item_url + "xx/sessions" + data = {"username": self.san_user, + "password": self.san_password, + "scope": "0"} + if self.vstore_name: + data['vstorename'] = self.vstore_name + self.init_http_head() + self.session.mount(item_url.lower(), HostNameIgnoringAdapter()) + result = self.do_call(url, data, + calltimeout=constants.LOGIN_SOCKET_TIMEOUT, + filter_flag=True) + + if (result['error']['code'] != 0) or ("data" not in result): + LOG.error("Login error. URL: %(url)s\n" + "Reason: %(reason)s.", + {"url": item_url, "reason": result}) + continue + + LOG.info('Login success: %(url)s', {'url': item_url}) + device_id = result['data']['deviceid'] + self.device_id = device_id + self.url = item_url + device_id + self.session.headers['iBaseToken'] = result['data']['iBaseToken'] + if (result['data']['accountstate'] + in constants.PWD_EXPIRED_OR_INITIAL): + self.logout() + msg = _("Password has expired or initial, " + "please change the password.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + break + + if device_id is None: + msg = _("Failed to login with all rest URLs.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return device_id + + def try_login(self): + try: + self.login() + except Exception as err: + LOG.warning('Login failed. Error: %s.', err) + + def relogin(self, old_token): + """Relogin Huawei storage array + + When batch apporation failed + """ + old_url = self.url + if self.url is None: + try: + self.login() + except Exception as err: + LOG.error("Relogin failed. Error: %s.", err) + return False + LOG.info('Relogin: \n' + 'Replace URL: \n' + 'Old URL: %(old_url)s\n,' + 'New URL: %(new_url)s\n.', + {'old_url': old_url, + 'new_url': self.url}) + elif old_token == self.session.headers['iBaseToken']: + try: + self.logout() + except Exception as err: + LOG.warning('Logout failed. Error: %s.', err) + + try: + self.login() + except Exception as err: + LOG.error("Relogin failed. Error: %s.", err) + return False + LOG.info('First logout then login: \n' + 'Replace URL: \n' + 'Old URL: %(old_url)s\n,' + 'New URL: %(new_url)s\n.', + {'old_url': old_url, + 'new_url': self.url}) + else: + LOG.info('Relogin has been successed by other thread.') + return True + + def call(self, url, data=None, method=None, filter_flag=False): + """Send requests to server. + + If fail, try another RestURL. + """ + with self.call_lock.read_lock(): + if self.url: + old_token = self.session.headers.get('iBaseToken') + result = self.do_call(url, data, method, + filter_flag=filter_flag) + else: + old_token = None + result = {"error": { + "code": constants.ERROR_UNAUTHORIZED_TO_SERVER, + "description": "unauthorized."}} + + error_code = result['error']['code'] + if (error_code == constants.ERROR_CONNECT_TO_SERVER + or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER + or error_code == constants.ERROR_BAD_STATUS_LINE): + LOG.error("Can't open the recent url, relogin.") + with self.call_lock.write_lock(): + relogin_result = self.relogin(old_token) + if relogin_result: + with self.call_lock.read_lock(): + result = self.do_call(url, data, method, + filter_flag=filter_flag) + if result['error']['code'] in constants.RELOGIN_ERROR_PASS: + LOG.warning('This operation maybe successed first time') + result['error']['code'] = 0 + elif result['error']['code'] == 0: + LOG.info('Successed in the second time.') + else: + LOG.info('Failed in the second time, Reason: %s', result) + else: + LOG.error('Relogin failed, no need to send again.') + return result + + def logout(self): + """Logout the session.""" + url = "/sessions" + if self.url: + result = self.do_call(url, None, "DELETE") + self._assert_rest_result(result, _('Logout session error.')) + + def _assert_rest_result(self, result, err_str): + if result['error']['code'] != 0: + msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, + 'res': result}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _assert_data_in_result(self, result, msg): + if 'data' not in result: + err_msg = _('%s "data" is not in result.') % msg + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_lun(self, lun_params): + # Set the mirror switch always on + lun_params['MIRRORPOLICY'] = '1' + url = "/lun" + result = self.call(url, lun_params) + if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: + lun_id = self.get_lun_id_by_name(lun_params['NAME']) + if lun_id: + return self.get_lun_info(lun_id) + + msg = _('Create lun error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'] + + def check_lun_exist(self, lun_id, lun_wwn=None): + url = "/lun/" + lun_id + result = self.call(url, None, "GET") + error_code = result['error']['code'] + if error_code != 0: + if error_code == constants.ERROR_LUN_NOT_EXIST: + LOG.warning("Can't find LUN %s on the array.", lun_id) + return False + else: + msg = (_("Check LUN exist error.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if lun_wwn and result['data']['WWN'] != lun_wwn: + LOG.debug("LUN ID %(id)s with WWN %(wwn)s does not exist on " + "the array.", {"id": lun_id, "wwn": lun_wwn}) + return False + + return True + + def delete_lun(self, lun_id): + url = "/lun/" + lun_id + data = {"TYPE": "11", + "ID": lun_id} + result = self.call(url, data, "DELETE") + self._assert_rest_result(result, _('Delete lun error.')) + + def get_all_pools(self): + url = "/storagepool" + result = self.call(url, None, "GET", filter_flag=True) + msg = _('Query resource pool error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return result['data'] + + def get_inband_pools(self): + pools = self.get_all_pools() + + def is_inband_pool(pool): + return pool["NAME"] in self.storage_pools + return filter(is_inband_pool, pools) + + def get_pool_info(self, pool_name=None, pools=None): + info = {} + if not pool_name: + return info + + for pool in pools: + if pool_name.strip() != pool.get('NAME'): + continue + + if pool.get('USAGETYPE') == constants.FILE_SYSTEM_POOL_TYPE: + break + + info['ID'] = pool.get('ID') + info['CAPACITY'] = pool.get('DATASPACE', pool['USERFREECAPACITY']) + info['TOTALCAPACITY'] = pool.get('USERTOTALCAPACITY') + info['TIER0CAPACITY'] = pool.get('TIER0CAPACITY') + info['TIER1CAPACITY'] = pool.get('TIER1CAPACITY') + info['TIER2CAPACITY'] = pool.get('TIER2CAPACITY') + + return info + + def get_pool_id(self, pool_name): + pools = self.get_all_pools() + pool_info = self.get_pool_info(pool_name, pools) + if not pool_info: + # The following code is to keep compatibility with old version of + # Huawei driver. + for pool_name in self.storage_pools: + pool_info = self.get_pool_info(pool_name, pools) + if pool_info: + break + + if not pool_info: + msg = _('Can not get pool info. pool: %s') % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return pool_info['ID'] + + def _get_id_from_result(self, result, name, key): + if 'data' in result: + for item in result['data']: + if name == item.get(key): + return item['ID'] + + def get_lun_id_by_name(self, name): + if not name: + return + + url = "/lun?filter=NAME::%s" % name + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get lun id by name error.')) + + return self._get_id_from_result(result, name, 'NAME') + + def get_lun_info_by_name(self, name): + if not name: + return + + url = "/lun?filter=NAME::%s&range=[0-100]" % name + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get lun info by name error.')) + if result.get('data'): + return result['data'][0] + + def activate_snapshot(self, snapshot_id): + url = "/snapshot/activate" + data = ({"SNAPSHOTLIST": snapshot_id} + if type(snapshot_id) in (list, tuple) + else {"SNAPSHOTLIST": [snapshot_id]}) + result = self.call(url, data) + self._assert_rest_result(result, _('Activate snapshot error.')) + + def create_snapshot(self, lun_id, snapshot_name, snapshot_description): + url = "/snapshot" + data = {"TYPE": "27", + "NAME": snapshot_name, + "PARENTTYPE": "11", + "DESCRIPTION": snapshot_description, + "PARENTID": lun_id} + result = self.call(url, data) + + msg = _('Create snapshot error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'] + + def get_lun_id(self, volume, volume_name): + metadata = huawei_utils.get_lun_metadata(volume) + lun_id = (metadata.get('huawei_lun_id') or + self.get_lun_id_by_name(volume_name)) + + if not lun_id: + msg = (_("Can't find lun info on the array. " + "volume: %(id)s, lun name: %(name)s.") % + {'id': volume.id, 'name': volume_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return lun_id + + def check_snapshot_exist(self, snapshot_id, snapshot_wwn=None): + url = "/snapshot/%s" % snapshot_id + result = self.call(url, None, "GET") + error_code = result['error']['code'] + if error_code != 0: + if error_code == constants.ERROR_SNAPSHOT_NOT_EXIST: + return False + else: + msg = (_("Check snapshot exist error.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if snapshot_wwn: + if snapshot_wwn != result['data']['WWN']: + return False + + return True + + def stop_snapshot(self, snapshot_id): + url = "/snapshot/stop" + stopdata = {"ID": snapshot_id} + result = self.call(url, stopdata, "PUT") + self._assert_rest_result(result, _('Stop snapshot error.')) + + def delete_snapshot(self, snapshotid): + url = "/snapshot/%s" % snapshotid + data = {"TYPE": "27", "ID": snapshotid} + result = self.call(url, data, "DELETE") + self._assert_rest_result(result, _('Delete snapshot error.')) + + def get_snapshot_id_by_name(self, name): + if not name: + return + + url = "/snapshot?filter=NAME::%s" % name + description = 'The snapshot license file is unavailable.' + result = self.call(url, None, "GET") + if 'error' in result: + if description == result['error']['description']: + return + self._assert_rest_result(result, _('Get snapshot id error.')) + + return self._get_id_from_result(result, name, 'NAME') + + def create_luncopy(self, srclunid, tgtlunid, copyspeed): + data = {"NAME": 'LUNCopy_%s_%s' % (srclunid, tgtlunid), + "COPYSPEED": copyspeed, + "SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID" + % srclunid), + "TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID" + % tgtlunid), + } + result = self.call("/luncopy", data) + + msg = _('Create luncopy error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def add_host_to_hostgroup(self, host_id): + """Associate host to hostgroup. + + If hostgroup doesn't exist, create one. + """ + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = self.create_hostgroup_with_check(hostgroup_name) + is_associated = self._is_host_associate_to_hostgroup(hostgroup_id, + host_id) + if not is_associated: + self._associate_host_to_hostgroup(hostgroup_id, host_id) + + return hostgroup_id + + def get_tgt_port_group(self, tgt_port_group): + """Find target portgroup id by target port group name.""" + url = "/portgroup?filter=NAME::%s" % tgt_port_group + result = self.call(url, None, "GET") + + msg = _('Find portgroup error.') + self._assert_rest_result(result, msg) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def _associate_portgroup_to_view(self, view_id, portgroup_id): + url = "/MAPPINGVIEW/CREATE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "257", + "ASSOCIATEOBJID": portgroup_id, + "TYPE": "245", + "ID": view_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Associate portgroup to mapping ' + 'view error.')) + + def _portgroup_associated(self, view_id, portgroup_id): + url = ("/mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=%s" % portgroup_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Check portgroup associate error.')) + + if self._get_id_from_result(result, view_id, 'ID'): + return True + return False + + def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None, + lun_type=constants.LUN_TYPE, hypermetro_lun=False): + """Add hostgroup and lungroup to mapping view.""" + lungroup_name = constants.LUNGROUP_PREFIX + host_id + mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id + lungroup_id = self._find_lungroup(lungroup_name) + view_id = self.find_mapping_view(mapping_view_name) + map_info = {} + + LOG.info( + 'do_mapping, lun_group: %(lun_group)s, ' + 'view_id: %(view_id)s, lun_id: %(lun_id)s.', + {'lun_group': lungroup_id, + 'view_id': view_id, + 'lun_id': lun_id}) + + try: + # Create lungroup and add LUN into to lungroup. + if lungroup_id is None: + lungroup_id = self._create_lungroup(lungroup_name) + is_associated = self._is_lun_associated_to_lungroup(lungroup_id, + lun_id, + lun_type) + if not is_associated: + self.associate_lun_to_lungroup(lungroup_id, lun_id, lun_type) + + if view_id is None: + view_id = self._add_mapping_view(mapping_view_name) + self._associate_hostgroup_to_view(view_id, hostgroup_id) + self._associate_lungroup_to_view(view_id, lungroup_id) + if portgroup_id: + self._associate_portgroup_to_view(view_id, portgroup_id) + + else: + if not self.hostgroup_associated(view_id, hostgroup_id): + self._associate_hostgroup_to_view(view_id, hostgroup_id) + if not self.lungroup_associated(view_id, lungroup_id): + self._associate_lungroup_to_view(view_id, lungroup_id) + if portgroup_id: + if not self._portgroup_associated(view_id, + portgroup_id): + self._associate_portgroup_to_view(view_id, + portgroup_id) + + if hypermetro_lun: + aval_luns = self.find_view_by_id(view_id) + map_info["lun_id"] = lun_id + map_info["view_id"] = view_id + map_info["aval_luns"] = aval_luns + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error( + 'Error occurred when adding hostgroup and lungroup to ' + 'view. Remove lun from lungroup now.') + self.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + + return map_info + + def check_iscsi_initiators_exist_in_host(self, host_id): + url = "/iscsi_initiator?range=[0-1023]&PARENTID=%s" % host_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Get host initiators info failed.') + if "data" in result: + return True + + return False + + def ensure_initiator_added(self, initiator_name, host_id): + added = self._initiator_is_added_to_array(initiator_name) + if not added: + self._add_initiator_to_array(initiator_name) + if not self.is_initiator_associated_to_host(initiator_name, host_id): + self._associate_initiator_to_host(initiator_name, host_id) + + alua_info = self._find_alua_info(self.iscsi_info, initiator_name) + LOG.info('Use ALUA %s when adding initiator to host.', alua_info) + self._use_iscsi_alua(initiator_name, alua_info) + + def find_hostgroup(self, groupname): + """Get the given hostgroup id.""" + url = "/hostgroup?range=[0-8191]" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get hostgroup information error.')) + + return self._get_id_from_result(result, groupname, 'NAME') + + def _find_lungroup(self, lungroup_name): + """Get the given hostgroup id.""" + url = "/lungroup?range=[0-8191]" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get lungroup information error.')) + + return self._get_id_from_result(result, lungroup_name, 'NAME') + + def create_hostgroup_with_check(self, hostgroup_name): + """Check if host exists on the array, or create it.""" + hostgroup_id = self.find_hostgroup(hostgroup_name) + if hostgroup_id: + LOG.info( + 'create_hostgroup_with_check. ' + 'hostgroup name: %(name)s, ' + 'hostgroup id: %(id)s', + {'name': hostgroup_name, + 'id': hostgroup_id}) + return hostgroup_id + + try: + hostgroup_id = self._create_hostgroup(hostgroup_name) + except Exception: + LOG.info( + 'Failed to create hostgroup: %(name)s. ' + 'Please check if it exists on the array.', + {'name': hostgroup_name}) + hostgroup_id = self.find_hostgroup(hostgroup_name) + if hostgroup_id is None: + err_msg = (_( + 'Failed to create hostgroup: %(name)s. ' + 'Check if it exists on the array.') + % {'name': hostgroup_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + LOG.info( + 'create_hostgroup_with_check. ' + 'Create hostgroup success. ' + 'hostgroup name: %(name)s, ' + 'hostgroup id: %(id)s', + {'name': hostgroup_name, + 'id': hostgroup_id}) + return hostgroup_id + + def _create_hostgroup(self, hostgroup_name): + url = "/hostgroup" + data = {"TYPE": "14", "NAME": hostgroup_name} + result = self.call(url, data) + + msg = _('Create hostgroup error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def _create_lungroup(self, lungroup_name): + url = "/lungroup" + data = {"DESCRIPTION": lungroup_name, + "APPTYPE": '0', + "GROUPTYPE": '0', + "NAME": lungroup_name} + result = self.call(url, data) + + msg = _('Create lungroup error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def delete_lungroup(self, lungroup_id): + url = "/LUNGroup/" + lungroup_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Delete lungroup error.')) + + def lungroup_associated(self, view_id, lungroup_id): + url = ("/mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Check lungroup associate error.')) + + if self._get_id_from_result(result, view_id, 'ID'): + return True + return False + + def hostgroup_associated(self, view_id, hostgroup_id): + url = ("/mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Check hostgroup associate error.')) + + if self._get_id_from_result(result, view_id, 'ID'): + return True + return False + + def get_host_lun_id(self, host_id, lun_id, lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/associate?TYPE=%s&ASSOCIATEOBJTYPE=21" + "&ASSOCIATEOBJID=%s" % (cmd_type, lun_type, host_id)) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find host lun id error.')) + + host_lun_id = 1 + if 'data' in result: + for item in result['data']: + if lun_id == item['ID']: + associate_data = item['ASSOCIATEMETADATA'] + try: + hostassoinfo = json.loads(associate_data) + host_lun_id = hostassoinfo['HostLUNID'] + break + except Exception as err: + LOG.error("JSON transfer data error. %s.", err) + raise + return host_lun_id + + def get_host_id_by_name(self, host_name): + """Get the given host ID.""" + url = "/host?filter=NAME::%s" % host_name + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find host in hostgroup error.')) + + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def add_host_with_check(self, host_name): + host_id = huawei_utils.get_host_id(self, host_name) + if host_id: + LOG.info( + 'add_host_with_check. ' + 'host name: %(name)s, ' + 'host id: %(id)s', + {'name': host_name, + 'id': host_id}) + return host_id + + encoded_name = huawei_utils.encode_host_name(host_name) + + try: + host_id = self._add_host(encoded_name, host_name) + except Exception: + LOG.info( + 'Failed to create host: %(name)s. ' + 'Check if it exists on the array.', + {'name': encoded_name}) + host_id = self.get_host_id_by_name(encoded_name) + if not host_id: + msg = _('Failed to create host: %(name)s. ' + 'Please check if it exists on the array.' + ) % {'name': encoded_name} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info( + 'add_host_with_check. ' + 'create host success. ' + 'host name: %(name)s, ' + 'host id: %(id)s', + {'name': encoded_name, + 'id': host_id}) + return host_id + + def _add_host(self, hostname, host_name_before_hash): + """Add a new host.""" + url = "/host" + data = {"TYPE": "21", + "NAME": hostname, + "OPERATIONSYSTEM": "4", + "DESCRIPTION": host_name_before_hash} + result = self.call(url, data) + self._assert_rest_result(result, _('Add new host error.')) + + if 'data' in result: + return result['data']['ID'] + + def _is_host_associate_to_hostgroup(self, hostgroup_id, host_id): + """Check whether the host is associated to the hostgroup.""" + url = ("/host/associate?TYPE=21&" + "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) + + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Check hostgroup associate error.')) + + if self._get_id_from_result(result, host_id, 'ID'): + return True + + return False + + def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): + """Check whether the lun is associated to the lungroup.""" + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/associate?TYPE=%s&" + "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" + % (cmd_type, lun_type, lungroup_id)) + + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Check lungroup associate error.')) + + if self._get_id_from_result(result, lun_id, 'ID'): + return True + + return False + + def _associate_host_to_hostgroup(self, hostgroup_id, host_id): + url = "/hostgroup/associate" + data = {"TYPE": "14", + "ID": hostgroup_id, + "ASSOCIATEOBJTYPE": "21", + "ASSOCIATEOBJID": host_id} + + result = self.call(url, data) + self._assert_rest_result(result, _('Associate host to hostgroup ' + 'error.')) + + def associate_lun_to_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): + """Associate lun to lungroup.""" + url = "/lungroup/associate" + data = {"ID": lungroup_id, + "ASSOCIATEOBJTYPE": lun_type, + "ASSOCIATEOBJID": lun_id} + result = self.call(url, data) + self._assert_rest_result(result, _('Associate lun to lungroup error.')) + + def remove_lun_from_lungroup(self, lungroup_id, lun_id, + lun_type=constants.LUN_TYPE): + """Remove lun from lungroup.""" + url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=%s" + "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_type, lun_id)) + + result = self.call(url, None, 'DELETE') + self._assert_rest_result( + result, _('Delete associated lun from lungroup error.')) + + def _initiator_is_added_to_array(self, ininame): + """Check whether the initiator is already added on the array.""" + url = "/iscsi_initiator?range=[0-1023]" + result = self.call(url, None, "GET") + self._assert_rest_result(result, + _('Check initiator added to array error.')) + + if self._get_id_from_result(result, ininame, 'ID'): + return True + return False + + def is_initiator_associated_to_host(self, ininame, host_id): + """Check whether the initiator is associated to the host.""" + url = "/iscsi_initiator?range=[0-1023]" + result = self.call(url, None, "GET") + self._assert_rest_result( + result, _('Check initiator associated to host error.')) + + for item in result.get('data'): + if item['ID'] == ininame: + if item['ISFREE'] == "true": + return False + if item['PARENTID'] == host_id: + return True + else: + msg = (_("Initiator %(ini)s has been added to host " + "%(host)s.") % {"ini": ininame, + "host": item['PARENTNAME']}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return True + + def is_initiator_used_chap(self, ininame): + """Check whether the initiator is associated to the host.""" + url = "/iscsi_initiator?range=[0-256]" + result = self.call(url, None, "GET") + self._assert_rest_result(result, + 'Check initiator associated to host error.') + + if "data" in result: + for item in result['data']: + if item['ID'] == ininame and item['USECHAP'] == "true": + return True + return False + + def _add_initiator_to_array(self, initiator_name): + """Add a new initiator to storage device.""" + url = "/iscsi_initiator" + data = {"TYPE": "222", + "ID": initiator_name, + "USECHAP": "false"} + result = self.call(url, data, "POST") + self._assert_rest_result(result, + _('Add initiator to array error.')) + + def _add_initiator_to_host(self, initiator_name, host_id): + url = "/iscsi_initiator/" + initiator_name + data = {"TYPE": "222", + "ID": initiator_name, + "USECHAP": "false", + "PARENTTYPE": "21", + "PARENTID": host_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, + _('Associate initiator to host error.')) + + def _associate_initiator_to_host(self, + initiator_name, + host_id): + """Associate initiator with the host.""" + chapinfo = self.find_chap_info(self.iscsi_info, initiator_name) + if chapinfo: + LOG.info('Use CHAP when adding initiator to host.') + self._use_chap(chapinfo, initiator_name, host_id) + else: + self._add_initiator_to_host(initiator_name, host_id) + + def find_chap_info(self, iscsi_info, initiator_name): + """Find CHAP info from xml.""" + chapinfo = None + for ini in iscsi_info: + if ini['Name'] == initiator_name: + if 'CHAPinfo' in ini: + chapinfo = ini['CHAPinfo'] + break + + return chapinfo + + def _find_alua_info(self, config, initiator_name): + """Find ALUA info from xml.""" + alua_info = {'ALUA': '0'} + for ini in config: + if ini.get('Name') == initiator_name: + if 'ALUA' in ini: + alua_info['ALUA'] = ini['ALUA'] + + if alua_info['ALUA'] not in ('0', '1'): + msg = _('Invalid ALUA value. ALUA value must be 1 or 0.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + if alua_info['ALUA'] == '1': + for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'): + if k in ini: + alua_info[k] = ini[k] + + break + + return alua_info + + def _use_chap(self, chapinfo, initiator_name, host_id): + """Use CHAP when adding initiator to host.""" + (chap_username, chap_password) = chapinfo.split(";") + + url = "/iscsi_initiator/" + initiator_name + data = {"TYPE": "222", + "USECHAP": "true", + "CHAPNAME": chap_username, + "CHAPPASSWORD": chap_password, + "ID": initiator_name, + "PARENTTYPE": "21", + "PARENTID": host_id} + result = self.call(url, data, "PUT", filter_flag=True) + msg = _('Use CHAP to associate initiator to host error. ' + 'Please check the CHAP username and password.') + self._assert_rest_result(result, msg) + + def _use_iscsi_alua(self, initiator_name, alua_info): + """Use ALUA when adding initiator to host.""" + url = "/iscsi_initiator" + data = {"ID": initiator_name, + 'MULTIPATHTYPE': alua_info.pop('ALUA')} + data.update(alua_info) + + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Use ALUA to associate initiator to host error.')) + + def remove_chap(self, initiator_name): + """Remove CHAP when terminate connection.""" + url = "/iscsi_initiator" + data = {"USECHAP": "false", + "MULTIPATHTYPE": "0", + "ID": initiator_name} + result = self.call(url, data, "PUT") + + self._assert_rest_result(result, _('Remove CHAP error.')) + + def find_mapping_view(self, name): + """Find mapping view.""" + if not name: + return None + url = "/mappingview?filter=NAME::%s" % name + result = self.call(url, None, "GET") + + msg = _('Find mapping view error.') + self._assert_rest_result(result, msg) + + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def _add_mapping_view(self, name): + if not name: + return None + url = "/mappingview" + data = {"NAME": name, "TYPE": "245"} + result = self.call(url, data) + self._assert_rest_result(result, _('Add mapping view error.')) + + return result['data']['ID'] + + def _associate_hostgroup_to_view(self, view_id, hostgroup_id): + url = "/MAPPINGVIEW/CREATE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "14", + "ASSOCIATEOBJID": hostgroup_id, + "TYPE": "245", + "ID": view_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Associate host to mapping view ' + 'error.')) + + def _associate_lungroup_to_view(self, view_id, lungroup_id): + url = "/MAPPINGVIEW/CREATE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "256", + "ASSOCIATEOBJID": lungroup_id, + "TYPE": "245", + "ID": view_id} + + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Associate lungroup to mapping view error.')) + + def delete_lungroup_mapping_view(self, view_id, lungroup_id): + """Remove lungroup associate from the mapping view.""" + url = "/mappingview/REMOVE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "256", + "ASSOCIATEOBJID": lungroup_id, + "TYPE": "245", + "ID": view_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Delete lungroup from mapping view ' + 'error.')) + + def delete_hostgoup_mapping_view(self, view_id, hostgroup_id): + """Remove hostgroup associate from the mapping view.""" + url = "/mappingview/REMOVE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "14", + "ASSOCIATEOBJID": hostgroup_id, + "TYPE": "245", + "ID": view_id} + + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Delete hostgroup from mapping view error.')) + + def delete_portgroup_mapping_view(self, view_id, portgroup_id): + """Remove portgroup associate from the mapping view.""" + url = "/mappingview/REMOVE_ASSOCIATE" + data = {"ASSOCIATEOBJTYPE": "257", + "ASSOCIATEOBJID": portgroup_id, + "TYPE": "245", + "ID": view_id} + + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Delete portgroup from mapping view error.')) + + def delete_mapping_view(self, view_id): + """Remove mapping view from the storage.""" + url = "/mappingview/" + view_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Delete mapping view error.')) + + def get_obj_count_from_lungroup(self, lungroup_id): + """Get all objects count associated to the lungroup.""" + lun_count = self._get_obj_count_from_lungroup_by_type( + lungroup_id, constants.LUN_TYPE) + snapshot_count = self._get_obj_count_from_lungroup_by_type( + lungroup_id, constants.SNAPSHOT_TYPE) + return int(lun_count) + int(snapshot_count) + + def _get_obj_count_from_lungroup_by_type(self, lungroup_id, + lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + lunnum = 0 + if not lungroup_id: + return lunnum + + url = ("/%s/count?TYPE=%s&ASSOCIATEOBJTYPE=256&" + "ASSOCIATEOBJID=%s" % (cmd_type, lun_type, lungroup_id)) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find obj number error.')) + if 'data' in result: + lunnum = int(result['data']['COUNT']) + return lunnum + + def is_portgroup_associated_to_view(self, view_id, portgroup_id): + """Check whether the port group is associated to the mapping view.""" + url = ("/portgroup/associate?ASSOCIATEOBJTYPE=245&" + "ASSOCIATEOBJID=%s&range=[0-8191]" % view_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find portgroup from mapping view ' + 'error.')) + + if self._get_id_from_result(result, portgroup_id, 'ID'): + return True + return False + + def find_lungroup_from_map(self, view_id): + """Get lungroup from the given map""" + url = ("/mappingview/associate/lungroup?TYPE=256&" + "ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=%s" % view_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find lun group from mapping view ' + 'error.')) + lungroup_id = None + if 'data' in result: + # One map can have only one lungroup. + for item in result['data']: + lungroup_id = item['ID'] + + return lungroup_id + + def start_luncopy(self, luncopy_id): + """Start a LUNcopy.""" + url = "/LUNCOPY/start" + data = {"TYPE": "219", "ID": luncopy_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Start LUNcopy error.')) + + def _get_capacity(self, pool_name, result): + """Get free capacity and total capacity of the pool.""" + pool_info = self.get_pool_info(pool_name, result) + pool_capacity = {'total_capacity': 0.0, + 'free_capacity': 0.0} + + if pool_info: + total = float(pool_info['TOTALCAPACITY']) / constants.CAPACITY_UNIT + free = float(pool_info['CAPACITY']) / constants.CAPACITY_UNIT + pool_capacity['total_capacity'] = total + pool_capacity['free_capacity'] = free + + return pool_capacity + + def _get_disk_type(self, pool_name, result): + """Get disk type of the pool.""" + pool_info = self.get_pool_info(pool_name, result) + if not pool_info: + return None + + pool_disk = [] + for i, x in enumerate(['ssd', 'sas', 'nl_sas']): + if (pool_info['TIER%dCAPACITY' % i] and + pool_info['TIER%dCAPACITY' % i] != '0'): + pool_disk.append(x) + + if len(pool_disk) > 1: + pool_disk = ['mix'] + + return pool_disk[0] if pool_disk else None + + def _get_smarttier(self, disk_type): + return disk_type is not None and disk_type == 'mix' + + def get_luncopy_info(self, luncopy_id): + """Get LUNcopy information.""" + url = "/LUNCOPY/%s" % luncopy_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get LUNcopy information error.')) + + luncopyinfo = {} + if 'data' in result: + luncopyinfo['name'] = result['data']['NAME'] + luncopyinfo['id'] = result['data']['ID'] + luncopyinfo['state'] = result['data']['HEALTHSTATUS'] + luncopyinfo['status'] = result['data']['RUNNINGSTATUS'] + return luncopyinfo + + def delete_luncopy(self, luncopy_id): + """Delete a LUNcopy.""" + url = "/LUNCOPY/%s" % luncopy_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Delete LUNcopy error.')) + + def get_init_targ_map(self, wwns, is_get_manage_tgt=False): + init_targ_map = {} + tgt_port_wwns = [] + for wwn in wwns: + tgtwwpns = self.get_fc_target_wwpns(wwn) + if not tgtwwpns: + continue + + init_targ_map[wwn] = tgtwwpns + for tgtwwpn in tgtwwpns: + if tgtwwpn not in tgt_port_wwns: + tgt_port_wwns.append(tgtwwpn) + + if not tgt_port_wwns and not is_get_manage_tgt: + err_msg = (_('Get FC target wwpns error, tgt_port_wwns: ' + '%(tgt_port_wwns)s, init_targ_map: %(init_targ_map)s') + % {'tgt_port_wwns': tgt_port_wwns, + 'init_targ_map': init_targ_map}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return (tgt_port_wwns, init_targ_map) + + def get_online_free_wwns(self): + """Get online free WWNs. + + If no new ports connected, return an empty list. + """ + url = "/fc_initiator?ISFREE=true&range=[0-65535]" + result = self.call(url, None, "GET") + + msg = _('Get connected free FC wwn error.') + self._assert_rest_result(result, msg) + + wwns = [] + if 'data' in result: + for item in result['data']: + if item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE: + wwns.append(item['ID']) + + return wwns + + def _use_fc_alua(self, wwn, alua_info): + url = "/fc_initiator/" + wwn + data = {"ID": wwn, + "MULTIPATHTYPE": alua_info.pop('ALUA')} + data.update(alua_info) + + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Set ALUA for fc initiator error.')) + + def add_fc_port_to_host(self, host_id, wwn): + """Add a FC port to the host.""" + url = "/fc_initiator/" + wwn + data = {"ID": wwn, + "PARENTTYPE": 21, + "PARENTID": host_id} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Add FC port to host error.')) + + def get_fc_target_wwpns(self, wwn): + url = ("/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn) + result = self.call(url, None, "GET") + + msg = _('Get FC target wwpn error.') + self._assert_rest_result(result, msg) + + fc_wwpns = [] + if "data" in result: + for item in result['data']: + if wwn == item['INITIATOR_PORT_WWN']: + fc_wwpns.append(item['TARGET_PORT_WWN']) + + return fc_wwpns + + def update_volume_stats(self): + data = {} + data['pools'] = [] + result = self.get_all_pools() + for pool_name in self.storage_pools: + capacity = self._get_capacity(pool_name, result) + disk_type = self._get_disk_type(pool_name, result) + tier_support = self._get_smarttier(disk_type) + pool = {} + pool.update(dict( + location_info=self.device_id, + pool_name=pool_name, + total_capacity_gb=capacity['total_capacity'], + free_capacity_gb=capacity['free_capacity'], + reserved_percentage=self.configuration.safe_get( + 'reserved_percentage'), + max_over_subscription_ratio=self.configuration.safe_get( + 'max_over_subscription_ratio'), + smarttier=tier_support + )) + if disk_type: + pool['disk_type'] = disk_type + + data['pools'].append(pool) + return data + + def check_storage_pools(self): + result = self.get_all_pools() + s_pools = [] + for pool in result: + if 'USAGETYPE' in pool: + if pool['USAGETYPE'] in (constants.BLOCK_STORAGE_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): + s_pools.append(pool['NAME']) + else: + s_pools.append(pool['NAME']) + for pool_name in self.storage_pools: + if pool_name not in s_pools: + err_msg = (_('Block storage pool %s does not exist on ' + 'the array.') % pool_name) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _update_qos_policy_lunlist(self, lun_list, policy_id): + url = "/ioclass/" + policy_id + data = {"TYPE": "230", + "ID": policy_id, + "LUNLIST": lun_list} + + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Update QoS policy error.')) + + def _get_tgt_ip_from_portgroup(self, portgroup_id): + target_ips = [] + url = ("/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" + "&ASSOCIATEOBJID=%s" % portgroup_id) + result = self.call(url, None, "GET") + + msg = _('Get target IP error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + if 'data' in result: + for item in result['data']: + if ((item['IPV4ADDR'] or item['IPV6ADDR']) + and item['HEALTHSTATUS'] == constants.STATUS_HEALTH + and item['RUNNINGSTATUS'] == constants.STATUS_RUNNING): + if item['IPV4ADDR']: + target_ips.append(item['IPV4ADDR']) + if item['IPV6ADDR']: + target_ips.append(item['IPV6ADDR']) + LOG.info('_get_tgt_ip_from_portgroup: Get ip: %s.', target_ips) + + return target_ips + + def get_iscsi_params(self, connector): + """Get target iSCSI params, including iqn, IP.""" + initiator = connector['initiator'] + multipath = connector.get('multipath', False) + target_ips = [] + target_iqns = [] + temp_tgt_ips = [] + portgroup = None + portgroup_id = None + + for ini in self.iscsi_info: + if ini['Name'] == initiator: + portgroup = ini.get('TargetPortGroup') + + if portgroup: + portgroup_id = self.get_tgt_port_group(portgroup) + tgt_ips = self._get_tgt_ip_from_portgroup(portgroup_id) + temp_tgt_ips = self.convert_ip_to_normalized_format(tgt_ips) + port_ips = self._get_tgt_port_ip_from_rest() + valid_tgt_ips = self.convert_ip_to_normalized_format(port_ips) + + for ip in temp_tgt_ips: + if ip in valid_tgt_ips: + target_ips.append(ip) + + if not target_ips and multipath: + msg = (_( + 'get_iscsi_params: No valid port in portgroup. ' + 'portgroup_id: %(id)s, please check it on storage.') + % {'id': portgroup_id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not target_ips: + target_ips = self._get_target_ip(initiator) + + # Deal with the remote tgt ip. + if 'remote_target_ip' in connector: + target_ips.append(connector['remote_target_ip']) + LOG.info('Get the default ip: %s.', target_ips) + target_ips = self.convert_ip_to_normalized_format(target_ips) + + for ip in target_ips: + target_iqn = self._get_tgt_iqn_from_rest(ip) + if target_iqn: + target_iqns.append(target_iqn) + + if not target_iqns: + err_msg = (_( + 'Get iSCSI target iqn error, please check the target IP ' + 'configured on array.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + target_ips = [] + for iqn in target_iqns: + ip = iqn.split(':', 5)[5] + if netaddr.IPAddress(ip).version == 6: + ip = '[' + ip + ']' + target_ips.append(ip) + + return (target_iqns, target_ips, portgroup_id) + + def convert_ip_to_normalized_format(self, target_ips): + format_ips = [] + for ip in target_ips: + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + format_ips.append(ip) + return format_ips + + def _get_target_ip(self, initiator): + target_ips = [] + for ini in self.iscsi_info: + if ini['Name'] == initiator: + if ini.get('TargetIP'): + target_ips.append(ini.get('TargetIP')) + + # If not specify target IP for some initiators, use default IP. + if not target_ips: + default_target_ips = self.iscsi_default_target_ip + if default_target_ips: + target_ips.append(default_target_ips[0]) + + else: + msg = (_( + 'get_iscsi_params: Failed to get target IP ' + 'for initiator %(ini)s, please check config file.') + % {'ini': initiator}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return target_ips + + def _get_tgt_port_ip_from_rest(self): + url = "/iscsi_tgt_port" + result = self.call(url, None, "GET") + info_list = [] + target_ips = [] + if result['error']['code'] != 0: + LOG.warning("Can't find target port info from rest.") + return target_ips + + elif not result['data']: + msg = (_( + "Can't find valid IP from rest, please check it on storage.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if 'data' in result: + for item in result['data']: + info_list.append(item['ID']) + + if not info_list: + LOG.warning("Can't find target port info from rest.") + return target_ips + + for info in info_list: + iqn_info = info.split(',', 1)[0] + target_ip = iqn_info.split(':', 5)[5] + target_ips.append(target_ip) + return target_ips + + def _get_tgt_iqn_from_rest(self, target_ip): + url = "/iscsi_tgt_port" + result = self.call(url, None, "GET") + + target_iqn = None + if result['error']['code'] != 0: + LOG.warning("Can't find target iqn from rest.") + return target_iqn + + if 'data' in result: + for item in result['data']: + iqn_info = item['ID'].split(',', 1)[0] + ip = iqn_info.split(':', 5)[5] + format_ip = netaddr.IPAddress(ip) + if format_ip.version == 6: + ip = str(format_ip.format(dialect=netaddr.ipv6_compact)) + if target_ip == ip: + target_iqn = item['ID'] + break + + if not target_iqn: + LOG.warning("Can't find target iqn from rest.") + return target_iqn + + split_list = target_iqn.split(",") + target_iqn_before = split_list[0] + + split_list_new = target_iqn_before.split("+") + target_iqn = split_list_new[1] + + return target_iqn + + def create_qos_policy(self, qos, lun_id): + # Get local time. + localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) + # Package QoS name. + qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime + + data = {"TYPE": "230", + "NAME": qos_name, + "LUNLIST": ["%s" % lun_id], + "CLASSTYPE": "1", + "SCHEDULEPOLICY": "2", + "SCHEDULESTARTTIME": "1410969600", + "STARTTIME": "08:00", + "DURATION": "86400", + "CYCLESET": "[1,2,3,4,5,6,0]", + } + data.update(qos) + url = "/ioclass/" + + result = self.call(url, data) + self._assert_rest_result(result, _('Create QoS policy error.')) + + return result['data']['ID'] + + def delete_qos_policy(self, qos_id): + """Delete a QoS policy.""" + url = "/ioclass/" + qos_id + data = {"TYPE": "230", "ID": qos_id} + + result = self.call(url, data, 'DELETE') + self._assert_rest_result(result, _('Delete QoS policy error.')) + + def activate_deactivate_qos(self, qos_id, enablestatus): + """Activate or deactivate QoS. + + enablestatus: true (activate) + enbalestatus: false (deactivate) + """ + url = "/ioclass/active/" + qos_id + data = {"TYPE": 230, + "ID": qos_id, + "ENABLESTATUS": enablestatus} + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Activate or deactivate QoS error.')) + + def get_qos_info(self, qos_id): + """Get QoS information.""" + url = "/ioclass/" + qos_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get QoS information error.')) + + return result['data'] + + def get_lun_list_in_qos(self, qos_id, qos_info): + """Get the lun list in QoS.""" + lun_list = [] + lun_string = qos_info['LUNLIST'][1:-1] + + for lun in lun_string.split(","): + str = lun[1:-1] + lun_list.append(str) + + return lun_list + + def remove_lun_from_qos(self, lun_id, lun_list, qos_id): + """Remove lun from QoS.""" + lun_list = [i for i in lun_list if i != lun_id] + url = "/ioclass/" + qos_id + data = {"LUNLIST": lun_list, + "TYPE": 230, + "ID": qos_id} + result = self.call(url, data, "PUT") + + msg = _('Remove lun from QoS error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def change_lun_priority(self, lun_id): + """Change lun priority to high.""" + url = "/lun/" + lun_id + data = {"TYPE": "11", + "ID": lun_id, + "IOPRIORITY": "3"} + + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Change lun priority error.')) + + def change_lun_smarttier(self, lunid, smarttier_policy): + """Change lun smarttier policy.""" + url = "/lun/" + lunid + data = {"TYPE": "11", + "ID": lunid, + "DATATRANSFERPOLICY": smarttier_policy} + + result = self.call(url, data, "PUT") + self._assert_rest_result( + result, _('Change lun smarttier policy error.')) + + def get_qosid_by_lunid(self, lun_id): + """Get QoS id by lun id.""" + url = "/lun/" + lun_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get QoS id by lun id error.')) + + return result['data']['IOCLASSID'] + + def get_lungroupids_by_lunid(self, lun_id, lun_type=constants.LUN_TYPE): + """Get lungroup ids by lun id.""" + url = ("/lungroup/associate?TYPE=256" + "&ASSOCIATEOBJTYPE=%s&ASSOCIATEOBJID=%s" % (lun_type, lun_id)) + + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get lungroup id by lun id error.')) + + lungroup_ids = [] + if 'data' in result: + for item in result['data']: + lungroup_ids.append(item['ID']) + + return lungroup_ids + + def get_lun_info(self, lun_id, lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/%s" % (cmd_type, lun_id)) + result = self.call(url, None, "GET") + + msg = _('Get volume error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'] + + def get_all_luns(self, pool_id): + count = self.get_lun_count(pool_id) + for i in range(count // constants.MAX_QUERY_LUN_COUNT + 1): + begin = i * constants.MAX_QUERY_LUN_COUNT + query_range = (begin, begin + constants.MAX_QUERY_LUN_COUNT) + result = self.get_luns_page(query_range, pool_id) + luns = result.get('data', []) + for lun in luns: + yield lun + + def get_luns_page(self, query_range, pool_id): + url = "/lun?filter=PARENTID::%s&range=[%d-%d]" % ( + pool_id, query_range[0], query_range[1]) + result = self.call(url, None, "GET", filter_flag=True) + self._assert_rest_result(result, _('Get volume by name error.')) + return result + + def get_lun_count(self, pool_id): + url = "/lun/count?filter=PARENTID::%s" % pool_id + result = self.call(url, None, "GET", filter_flag=True) + msg = _('Get volume error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return int(result['data']['COUNT']) + + def get_snapshot_info(self, snapshot_id): + url = "/snapshot/" + snapshot_id + result = self.call(url, None, "GET") + + msg = _('Get snapshot error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'] + + def extend_lun(self, lun_id, new_volume_size): + url = "/lun/expand" + data = {"TYPE": 11, "ID": lun_id, + "CAPACITY": new_volume_size} + result = self.call(url, data, 'PUT') + + msg = _('Extend volume error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'] + + def create_lun_migration(self, src_id, dst_id, speed=2): + url = "/LUN_MIGRATION" + data = {"TYPE": '253', + "PARENTID": src_id, + "TARGETLUNID": dst_id, + "SPEED": speed, + "WORKMODE": 0} + + result = self.call(url, data, "POST") + msg = _('Create lun migration error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def get_lun_migration_task(self): + url = '/LUN_MIGRATION?range=[0-256]' + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get lun migration task error.')) + return result + + def delete_lun_migration(self, src_id, dst_id): + url = '/LUN_MIGRATION/' + src_id + result = self.call(url, None, "DELETE") + msg = _('Delete lun migration error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def get_partition_id_by_name(self, name): + url = "/cachepartition" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get partition by name error.')) + + return self._get_id_from_result(result, name, 'NAME') + + def get_partition_info_by_id(self, partition_id): + + url = '/cachepartition/' + partition_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, + _('Get partition by partition id error.')) + + return result['data'] + + def add_lun_to_partition(self, lun_id, partition_id): + url = "/lun/associate/cachepartition" + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.call(url, data, "POST") + self._assert_rest_result(result, _('Add lun to partition error.')) + + def remove_lun_from_partition(self, lun_id, partition_id): + url = ('/lun/associate/cachepartition?ID=' + partition_id + + '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=' + lun_id) + + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Remove lun from partition error.')) + + def get_cache_id_by_name(self, name): + url = "/SMARTCACHEPARTITION" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get cache by name error.')) + + return self._get_id_from_result(result, name, 'NAME') + + def get_cache_info_by_id(self, cacheid): + url = "/SMARTCACHEPARTITION/" + cacheid + data = {"TYPE": "273", + "ID": cacheid} + + result = self.call(url, data, "GET") + self._assert_rest_result( + result, _('Get smartcache by cache id error.')) + + return result['data'] + + def remove_lun_from_cache(self, lun_id, cache_id): + url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id, + "TYPE": 273} + + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Remove lun from cache error.')) + + def get_qos(self): + url = "/ioclass" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get QoS information error.')) + return result + + def add_lun_to_qos(self, qos_id, lun_id, lun_list): + """Add lun to QoS.""" + url = "/ioclass/" + qos_id + new_lun_list = [] + lun_list_string = lun_list[1:-1] + for lun_string in lun_list_string.split(","): + tmp_lun_id = lun_string[1:-1] + if '' != tmp_lun_id and tmp_lun_id != lun_id: + new_lun_list.append(tmp_lun_id) + + new_lun_list.append(lun_id) + + data = {"LUNLIST": new_lun_list, + "TYPE": 230, + "ID": qos_id} + result = self.call(url, data, "PUT") + msg = _('Associate lun to QoS error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def add_lun_to_cache(self, lun_id, cache_id): + url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id, + "TYPE": 273} + result = self.call(url, data, "PUT") + + self._assert_rest_result(result, _('Add lun to cache error.')) + + def get_array_info(self): + url = "/system/" + result = self.call(url, None, "GET", filter_flag=True) + self._assert_rest_result(result, _('Get array info error.')) + return result.get('data', None) + + def remove_host(self, host_id): + url = "/host/%s" % host_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Remove host from array error.')) + + def delete_hostgroup(self, hostgroup_id): + url = "/hostgroup/%s" % hostgroup_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Delete hostgroup error.')) + + def remove_host_from_hostgroup(self, hostgroup_id, host_id): + url_subfix001 = "/host/associate?TYPE=14&ID=%s" % hostgroup_id + url_subfix002 = "&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s" % host_id + url = url_subfix001 + url_subfix002 + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, + _('Remove host from hostgroup error.')) + + def remove_iscsi_from_host(self, initiator): + url = "/iscsi_initiator/remove_iscsi_from_host" + data = {"TYPE": '222', + "ID": initiator} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Remove iscsi from host error.')) + + def get_host_online_fc_initiators(self, host_id): + url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id + result = self.call(url, None, "GET") + + initiators = [] + if 'data' in result: + for item in result['data']: + if (('PARENTID' in item) and (item['PARENTID'] == host_id) + and (item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE)): + initiators.append(item['ID']) + + return initiators + + def get_host_fc_initiators(self, host_id): + url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id + result = self.call(url, None, "GET") + + initiators = [] + if 'data' in result: + for item in result['data']: + if (('PARENTID' in item) and (item['PARENTID'] == host_id)): + initiators.append(item['ID']) + + return initiators + + def get_host_iscsi_initiators(self, host_id): + url = "/iscsi_initiator?PARENTTYPE=21&PARENTID=%s" % host_id + result = self.call(url, None, "GET") + + initiators = [] + if 'data' in result: + for item in result['data']: + if (('PARENTID' in item) and (item['PARENTID'] == host_id)): + initiators.append(item['ID']) + + return initiators + + def update_lun(self, lun_id, data): + url = "/lun/" + lun_id + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Update lun properties error.')) + + def rename_lun(self, lun_id, new_name, description=None): + url = "/lun/" + lun_id + data = {"NAME": new_name} + if description: + data.update({"DESCRIPTION": description}) + result = self.call(url, data, "PUT") + msg = _('Rename lun on array error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def rename_snapshot(self, snapshot_id, new_name, description=None): + url = "/snapshot/" + snapshot_id + data = {"NAME": new_name} + if description: + data.update({"DESCRIPTION": description}) + result = self.call(url, data, "PUT") + msg = _('Rename snapshot on array error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + def is_fc_initiator_associated_to_host(self, ininame): + """Check whether the initiator is associated to the host.""" + url = '/fc_initiator?range=[0-65535]' + result = self.call(url, None, "GET") + self._assert_rest_result(result, + 'Check initiator associated to host error.') + + if "data" in result: + for item in result['data']: + if item['ID'] == ininame and item['ISFREE'] != "true": + return True + return False + + def remove_fc_from_host(self, initiator): + url = '/fc_initiator/remove_fc_from_host' + data = {"TYPE": '223', + "ID": initiator} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Remove fc from host error.')) + + def check_fc_initiators_exist_in_host(self, host_id): + url = "/fc_initiator?range=[0-65535]&PARENTID=%s" % host_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get host initiators info failed.')) + if 'data' in result: + return True + + return False + + def _fc_initiator_is_added_to_array(self, ininame): + """Check whether the fc initiator is already added on the array.""" + url = "/fc_initiator/" + ininame + result = self.call(url, None, "GET") + error_code = result['error']['code'] + if error_code != 0: + if error_code == constants.FC_INITIATOR_NOT_EXIST: + return False + msg = (_('Get fc initiator %(initiator)s on array error. ' + 'result: %(res)s.') % {'initiator': ininame, + 'res': result}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return True + + def _add_fc_initiator_to_array(self, ininame): + """Add a fc initiator to storage device.""" + url = '/fc_initiator/' + data = {"TYPE": '223', + "ID": ininame} + result = self.call(url, data) + self._assert_rest_result(result, _('Add fc initiator to array error.')) + + def ensure_fc_initiator_added(self, initiator_name, host_id): + added = self._fc_initiator_is_added_to_array(initiator_name) + if not added: + self._add_fc_initiator_to_array(initiator_name) + # Just add, no need to check whether have been added. + self.add_fc_port_to_host(host_id, initiator_name) + + alua_info = self._find_alua_info(self.fc_info, initiator_name) + LOG.info('Use ALUA %s when adding initiator to host.', alua_info) + self._use_fc_alua(initiator_name, alua_info) + + def get_fc_ports(self): + url = '/fc_port' + result = self.call(url, None, "GET") + msg = _('Get FC ports from array error.') + self._assert_rest_result(result, msg) + + return result.get('data', []) + + def get_fc_initiator_count(self): + url = '/fc_initiator/count' + result = self.call(url, None, "GET") + + msg = _('Get fc initiator count error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return int(result['data']['COUNT']) + + def get_fc_initiator_on_array(self): + count = self.get_fc_initiator_count() + if count <= 0: + return [] + + fc_initiators = [] + for i in range((count - 1) // constants.MAX_QUERY_COUNT + 1): + url = '/fc_initiator?range=[%d-%d]' % ( + i * constants.MAX_QUERY_COUNT, + (i + 1) * constants.MAX_QUERY_COUNT) + result = self.call(url, None, "GET") + + msg = _('Get FC initiators from array error.') + self._assert_rest_result(result, msg) + + if 'data' in result: + for item in result['data']: + fc_initiators.append(item['ID']) + + return fc_initiators + + def get_hyper_domain_id(self, domain_name): + url = "/HyperMetroDomain?range=[0-32]" + result = self.call(url, None, "GET") + domain_id = None + if "data" in result: + for item in result['data']: + if domain_name == item['NAME']: + domain_id = item['ID'] + break + + msg = _('get_hyper_domain_id error.') + self._assert_rest_result(result, msg) + return domain_id + + def create_hypermetro(self, hcp_param): + url = "/HyperMetroPair" + result = self.call(url, hcp_param, "POST") + + msg = _('create_hypermetro_pair error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return result['data'] + + @utils.synchronized('huawei_delete_hypermetro_pair', external=True) + def delete_hypermetro(self, metro_id): + url = "/HyperMetroPair/" + metro_id + result = self.call(url, None, "DELETE") + + msg = _('delete_hypermetro error.') + self._assert_rest_result(result, msg) + + def sync_hypermetro(self, metro_id): + url = "/HyperMetroPair/synchronize_hcpair" + + data = {"ID": metro_id, + "TYPE": "15361"} + result = self.call(url, data, "PUT") + + msg = _('sync_hypermetro error.') + self._assert_rest_result(result, msg) + + def stop_hypermetro(self, metro_id): + url = '/HyperMetroPair/disable_hcpair' + + data = {"ID": metro_id, + "TYPE": "15361"} + result = self.call(url, data, "PUT") + + msg = _('stop_hypermetro error.') + self._assert_rest_result(result, msg) + + def get_hypermetro_by_id(self, metro_id): + url = "/HyperMetroPair?filter=ID::%s" % metro_id + result = self.call(url, None, "GET") + msg = _('get_hypermetro_by_id error.') + self._assert_rest_result(result, msg) + if result.get('data'): + return result['data'][0] + + def get_hypermetro_by_lun_name(self, lun_name): + url = "/HyperMetroPair?filter=LOCALOBJNAME::%s" % lun_name + result = self.call(url, None, "GET") + msg = _('Get hypermetro by local lun name %s error.') % lun_name + self._assert_rest_result(result, msg) + if result.get('data'): + return result['data'][0] + + def change_hostlun_id(self, map_info, hostlun_id): + url = "/mappingview" + view_id = six.text_type(map_info['view_id']) + lun_id = six.text_type(map_info['lun_id']) + hostlun_id = six.text_type(hostlun_id) + data = {"TYPE": 245, + "ID": view_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id, + "ASSOCIATEMETADATA": [{"LUNID": lun_id, + "hostLUNId": hostlun_id}]} + + result = self.call(url, data, "PUT") + + msg = 'change hostlun id error.' + self._assert_rest_result(result, msg) + + def find_view_by_id(self, view_id): + url = "/MAPPINGVIEW/" + view_id + result = self.call(url, None, "GET") + + msg = _('Change hostlun id error.') + self._assert_rest_result(result, msg) + if 'data' in result: + return result["data"]["AVAILABLEHOSTLUNIDLIST"] + + def get_metrogroup_by_name(self, name): + url = "/HyperMetro_ConsistentGroup?type='15364'" + result = self.call(url, None, "GET") + + msg = _('Get hypermetro group by name error.') + self._assert_rest_result(result, msg) + return self._get_id_from_result(result, name, 'NAME') + + def get_metrogroup_by_id(self, id): + url = "/HyperMetro_ConsistentGroup/" + id + result = self.call(url, None, "GET") + + msg = _('Get hypermetro group by id error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return result['data'] + + def create_metrogroup(self, name, description, domain_id): + url = "/HyperMetro_ConsistentGroup" + data = {"NAME": name, + "TYPE": "15364", + "DESCRIPTION": description, + "RECOVERYPOLICY": "1", + "SPEED": "2", + "PRIORITYSTATIONTYPE": "0", + "DOMAINID": domain_id} + result = self.call(url, data, "POST") + + msg = _('create hypermetro group error.') + self._assert_rest_result(result, msg) + if 'data' in result: + return result["data"]["ID"] + + def delete_metrogroup(self, metrogroup_id): + url = "/HyperMetro_ConsistentGroup/" + metrogroup_id + result = self.call(url, None, "DELETE") + + msg = _('Delete hypermetro group error.') + self._assert_rest_result(result, msg) + + def get_metrogroup(self, metrogroup_id): + url = "/HyperMetro_ConsistentGroup/" + metrogroup_id + result = self.call(url, None, "GET") + + msg = _('Get hypermetro group error.') + self._assert_rest_result(result, msg) + + def stop_metrogroup(self, metrogroup_id): + url = "/HyperMetro_ConsistentGroup/stop" + data = {"TYPE": "15364", + "ID": metrogroup_id + } + result = self.call(url, data, "PUT") + + msg = _('stop hypermetro group error.') + self._assert_rest_result(result, msg) + + def sync_metrogroup(self, metrogroup_id): + url = "/HyperMetro_ConsistentGroup/sync" + data = {"TYPE": "15364", + "ID": metrogroup_id + } + result = self.call(url, data, "PUT") + + msg = _('sync hypermetro group error.') + self._assert_rest_result(result, msg) + + def add_metro_to_metrogroup(self, metrogroup_id, metro_id): + url = "/hyperMetro/associate/pair" + data = {"TYPE": "15364", + "ID": metrogroup_id, + "ASSOCIATEOBJTYPE": "15361", + "ASSOCIATEOBJID": metro_id} + result = self.call(url, data, "POST") + + msg = _('Add hypermetro to metrogroup error.') + self._assert_rest_result(result, msg) + + def remove_metro_from_metrogroup(self, metrogroup_id, metro_id): + url = "/hyperMetro/associate/pair" + data = {"TYPE": "15364", + "ID": metrogroup_id, + "ASSOCIATEOBJTYPE": "15361", + "ASSOCIATEOBJID": metro_id} + result = self.call(url, data, "DELETE") + + msg = _('Delete hypermetro from metrogroup error.') + self._assert_rest_result(result, msg) + + def get_hypermetro_pairs(self): + url = "/HyperMetroPair?range=[0-4095]" + result = self.call(url, None, "GET") + msg = _('Get HyperMetroPair error.') + self._assert_rest_result(result, msg) + + return result.get('data', []) + + def get_split_mirrors(self): + url = "/splitmirror?range=[0-8191]" + result = self.call(url, None, "GET") + if result['error']['code'] == constants.NO_SPLITMIRROR_LICENSE: + msg = _('License is unavailable.') + raise exception.VolumeBackendAPIException(data=msg) + msg = _('Get SplitMirror error.') + self._assert_rest_result(result, msg) + + return result.get('data', []) + + def get_target_luns(self, id): + url = ("/SPLITMIRRORTARGETLUN/targetLUN?TYPE=228&PARENTID=%s&" + "PARENTTYPE=220") % id + result = self.call(url, None, "GET") + msg = _('Get target LUN of SplitMirror error.') + self._assert_rest_result(result, msg) + + target_luns = [] + for item in result.get('data', []): + target_luns.append(item.get('ID')) + return target_luns + + def get_migration_task(self): + url = "/LUN_MIGRATION?range=[0-256]" + result = self.call(url, None, "GET") + if result['error']['code'] == constants.NO_MIGRATION_LICENSE: + msg = _('License is unavailable.') + raise exception.VolumeBackendAPIException(data=msg) + msg = _('Get migration task error.') + self._assert_rest_result(result, msg) + + return result.get('data', []) + + def get_portgs_by_portid(self, port_id): + portgs = [] + if not port_id: + return portgs + url = ("/portgroup/associate/fc_port?TYPE=257&ASSOCIATEOBJTYPE=212&" + "ASSOCIATEOBJID=%s") % port_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get port groups by port error.')) + for item in result.get("data", []): + portgs.append(item["ID"]) + return portgs + + def get_views_by_portg(self, portg_id): + views = [] + if not portg_id: + return views + url = ("/mappingview/associate/portgroup?TYPE=245&ASSOCIATEOBJTYPE=" + "257&ASSOCIATEOBJID=%s") % portg_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get views by port group error.')) + for item in result.get("data", []): + views.append(item["ID"]) + return views + + def get_lungroup_by_view(self, view_id): + if not view_id: + return None + url = ("/lungroup/associate/mappingview?TYPE=256&ASSOCIATEOBJTYPE=" + "245&ASSOCIATEOBJID=%s") % view_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get LUN group by view error.')) + for item in result.get("data", []): + # In fact, there is just one lungroup in a view. + return item["ID"] + + def get_portgroup_by_view(self, view_id): + if not view_id: + return None + url = ("/portgroup/associate?ASSOCIATEOBJTYPE=245&" + "ASSOCIATEOBJID=%s") % view_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get port group by view error.')) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def get_fc_ports_by_portgroup(self, portg_id): + url = ("/fc_port/associate?ASSOCIATEOBJTYPE=257" + "&ASSOCIATEOBJID=%s") % portg_id + result = self.call(url, None, "GET") + self._assert_rest_result( + result, _('Get FC ports by port group error.')) + ports = {} + for item in result.get("data", []): + ports[item["WWN"]] = item["ID"] + return ports + + def create_portg(self, portg_name, description=""): + url = "/PortGroup" + data = {"DESCRIPTION": description, + "NAME": portg_name, + "TYPE": 257} + result = self.call(url, data, "POST") + self._assert_rest_result(result, _('Create port group error.')) + if "data" in result: + return result['data']['ID'] + + def add_port_to_portg(self, portg_id, port_id): + url = "/port/associate/portgroup" + data = {"ASSOCIATEOBJID": port_id, + "ASSOCIATEOBJTYPE": 212, + "ID": portg_id, + "TYPE": 257} + result = self.call(url, data, "POST") + self._assert_rest_result(result, _('Add port to port group error.')) + + def delete_portgroup(self, portg_id): + url = "/PortGroup/%s" % portg_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Delete port group error.')) + + def remove_port_from_portgroup(self, portg_id, port_id): + url = (("/port/associate/portgroup?ID=%(portg_id)s&TYPE=257&" + "ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(port_id)s") + % {"portg_id": portg_id, "port_id": port_id}) + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, _('Remove port from port group' + ' error.')) + + def get_portg_info(self, portg_id): + url = "/portgroup/%s" % portg_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get port group error.')) + + return result.get("data", {}) + + def append_portg_desc(self, portg_id, description): + portg_info = self.get_portg_info(portg_id) + new_description = portg_info.get('DESCRIPTION') + ',' + description + url = "/portgroup/%s" % portg_id + data = {"DESCRIPTION": new_description, + "ID": portg_id, + "TYPE": 257} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('Append port group description ' + 'error.')) + + def update_obj_desc(self, lun_id, description, + lun_type=constants.LUN_TYPE): + cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' + url = ("/%s/%s" % (cmd_type, lun_id)) + data = {"DESCRIPTION": description, + "ID": lun_id, + "TYPE": lun_type} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, _('update object description ' + 'error.')) + + def get_ports_by_portg(self, portg_id): + wwns = [] + url = ("/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" + "&ASSOCIATEOBJID=%s" % portg_id) + result = self.call(url, None, "GET") + + msg = _('Get ports by port group error.') + self._assert_rest_result(result, msg) + for item in result.get('data', []): + wwns.append(item['WWN']) + return wwns + + def get_remote_devices(self): + url = "/remote_device" + result = self.call(url, None, "GET", filter_flag=True) + self._assert_rest_result(result, _('Get remote devices error.')) + return result.get('data', []) + + def create_pair(self, pair_params): + url = "/REPLICATIONPAIR" + result = self.call(url, pair_params, "POST") + + msg = _('Create replication error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return result['data'] + + def get_pair_by_id(self, pair_id): + url = "/REPLICATIONPAIR/" + pair_id + result = self.call(url, None, "GET") + + msg = _('Get pair failed.') + self._assert_rest_result(result, msg) + return result.get('data', {}) + + def switch_pair(self, pair_id): + url = '/REPLICATIONPAIR/switch' + data = {"ID": pair_id, + "TYPE": "263"} + result = self.call(url, data, "PUT") + + msg = _('Switch over pair error.') + self._assert_rest_result(result, msg) + + def split_pair(self, pair_id): + url = '/REPLICATIONPAIR/split' + data = {"ID": pair_id, + "TYPE": "263"} + result = self.call(url, data, "PUT") + + msg = _('Split pair error.') + self._assert_rest_result(result, msg) + + def delete_pair(self, pair_id, force=False): + url = "/REPLICATIONPAIR/" + pair_id + data = None + if force: + data = {"ISLOCALDELETE": force} + + result = self.call(url, data, "DELETE") + + msg = _('delete_replication error.') + self._assert_rest_result(result, msg) + + def sync_pair(self, pair_id): + url = "/REPLICATIONPAIR/sync" + data = {"ID": pair_id, + "TYPE": "263"} + result = self.call(url, data, "PUT") + + msg = _('Sync pair error.') + self._assert_rest_result(result, msg) + + def check_pair_exist(self, pair_id): + url = "/REPLICATIONPAIR/" + pair_id + result = self.call(url, None, "GET") + error_code = result['error']['code'] + if error_code != 0: + if error_code == constants.REPLICATIONPAIR_NOT_EXIST: + return False + msg = (_("Check replication pair exist error.\nresult: %(res)s.") + % {'res': result}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return True + + def set_pair_second_access(self, pair_id, access): + url = "/REPLICATIONPAIR/" + pair_id + data = {"ID": pair_id, + "SECRESACCESS": access} + result = self.call(url, data, "PUT") + + msg = _('Set pair secondary access error.') + self._assert_rest_result(result, msg) + + def get_pair_info_by_lun_id(self, lun_id): + if not lun_id: + return None + + url = ("/REPLICATIONPAIR/associate?ASSOCIATEOBJTYPE=11&" + "ASSOCIATEOBJID=%s" % lun_id) + result = self.call(url, None, "GET") + msg = _('Get replication pair info by lun id error.') + self._assert_rest_result(result, msg) + for info in result.get("data", []): + if info.get("LOCALRESID") == lun_id: + return info + LOG.warning("Can not get the replica pair with the lun id: %s", lun_id) + return None + + def is_host_associated_to_hostgroup(self, host_id): + url = "/host/" + host_id + result = self.call(url, None, "GET") + data = result.get('data') + if data is not None: + return data.get('ISADD2HOSTGROUP') == 'true' + return False + + def _get_object_count(self, obj_name): + url = "/" + obj_name + "/count" + result = self.call(url, None, "GET", filter_flag=True) + + if result['error']['code'] != 0: + msg = 'Get obj %s count error' % obj_name + raise exception.VolumeBackendAPIException(data=msg) + + if result.get("data"): + return result.get("data").get("COUNT") + + def create_replicg(self, replicg_param): + url = '/CONSISTENTGROUP' + result = self.call(url, replicg_param, "POST") + + msg = (_("Create replication group %(res)s error.") + % {'res': replicg_param['DESCRIPTION']}) + self._assert_rest_result(result, msg) + + def get_replicg_by_name(self, group_name): + url = "/CONSISTENTGROUP?filter=NAME::%s" % group_name + result = self.call(url, None, 'GET') + + msg = (_("Get replication consisgroup %(name)s failed.") + % {'name': group_name}) + self._assert_rest_result(result, msg) + if 'data' in result: + return result['data'][0] + else: + return {} + + def add_replipair_to_replicg(self, replicg_id, pair_ids): + url = '/ADD_MIRROR' + data = {'ID': replicg_id, + 'RMLIST': pair_ids} + result = self.call(url, data, "PUT") + msg = (_("Add repli_pair %(pair)s to replicg %(group)s error.") + % {'pair': pair_ids, 'group': replicg_id}) + self._assert_rest_result(result, msg) + + def remove_replipair_from_replicg(self, replicg_id, pair_ids): + url = '/DEL_MIRROR' + data = {'ID': replicg_id, + 'RMLIST': pair_ids} + result = self.call(url, data, "PUT") + msg = (_("Remove repli_pair %(pair)s from " + "replicg %(group)s error.") + % {'pair': pair_ids, 'group': replicg_id}) + self._assert_rest_result(result, msg) + + def split_replicg(self, replicg_id): + url = '/SPLIT_CONSISTENCY_GROUP' + data = {'ID': replicg_id} + result = self.call(url, data, "PUT") + msg = (_("Split replicg %(group)s error.") + % {'group': replicg_id}) + self._assert_rest_result(result, msg) + + def delete_replicg(self, replicg_id): + url = '/CONSISTENTGROUP/%s?ISLOCALDELETE=0' % replicg_id + result = self.call(url, None, "DELETE") + msg = (_("Delete replicg %(group)s error.") + % {'group': replicg_id}) + self._assert_rest_result(result, msg) + + def sync_replicg(self, replicg_id): + url = '/SYNCHRONIZE_CONSISTENCY_GROUP' + data = {'ID': replicg_id} + result = self.call(url, data, "PUT") + if result['error']['code'] == constants.REPLICG_IS_EMPTY: + LOG.warning(("Replicg %(group)s does not have " + "remote replications.") + % {'group': replicg_id}) + return + msg = (_("Sync replicg %(group)s error.") + % {'group': replicg_id}) + self._assert_rest_result(result, msg) + + def get_replicg_info(self, replicg_id): + url = '/CONSISTENTGROUP/%s' % replicg_id + result = self.call(url, None, 'GET') + + msg = (_("Get replication consisgroup %(group)s failed.") + % {'group': replicg_id}) + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + return result['data'] + + def set_cg_second_access(self, replicg_id, access): + url = "/CONSISTENTGROUP/" + replicg_id + data = {"SECRESACCESS": access} + result = self.call(url, data, "PUT") + + msg = (_("Set cg %(group)s secondary access" + " to %(access)% failed.") + % {'group': replicg_id, 'access': access}) + self._assert_rest_result(result, msg) + + def switch_replicg(self, replicg_id): + url = '/SWITCH_GROUP_ROLE' + data = {'ID': replicg_id} + result = self.call(url, data, 'PUT') + + msg = (_("Switch replication consisgroup " + "%(group)s error.") + % {'group': replicg_id}) + self._assert_rest_result(result, msg) + + def get_controller_by_name(self, name): + controlers = self._get_all_controllers() + for controller in controlers: + if controller.get('LOCATION') == name: + return controller.get('ID') + + return None + + def _get_all_controllers(self): + url = "/controller" + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get all controller error.')) + return result.get('data', []) + + def get_license_feature_status(self): + url = "/license/feature" + result = self.call(url, None, "GET", filter_flag=True) + if result['error']['code'] != 0: + msg = (_("Query license status of features failed.\n" + "result: %(res)s.") + % {'res': result}) + LOG.error(msg) + return {} + dic_result = {} + for i in result.get('data', []): + for key, value in i.items(): + dic_result[key] = value + + return dic_result + + def create_clone_lun(self, src_id, lun_name): + data = { + "CLONESOURCEID": src_id, + "ISCLONE": True, + "NAME": lun_name, + } + + result = self.call('/lun', data, "POST") + self._assert_rest_result(result, _('Create clone lun error.')) + return result['data'] + + def split_clone_lun(self, clone_id): + data = { + "ID": clone_id, + "SPLITACTION": 1, + "ISCLONE": True, + "SPLITSPEED": 4, + } + + result = self.call('/lunclone_split_switch', data, "PUT") + self._assert_rest_result(result, _('Split clone lun error.')) + + def get_fc_initiator(self, initiator): + url = '/fc_initiator?filter=ID::%s' % initiator + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get fc initiator error.')) + + if result.get('data'): + return result['data'][0] + + def get_host_by_id(self, host_id): + url = "/host/%s" % host_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Get host error.')) + + return result['data'] + + def get_fc_ports_count(self): + url = "/fc_port/count" + result = self.call(url, None, "GET") + + msg = _('Get fc port count error.') + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return int(result['data']['COUNT']) + + def get_all_fc_ports(self): + fc_ports = [] + fc_port_count = self.get_fc_ports_count() + if fc_port_count <= 0: + return fc_ports + + for i in range((fc_port_count - 1) // constants.MAX_QUERY_COUNT + 1): + url = '/fc_port?range=[%d-%d]' % ( + i * constants.MAX_QUERY_COUNT, + (i + 1) * constants.MAX_QUERY_COUNT) + result = self.call(url, None, "GET", filter_flag=True) + + msg = _('Get FC ports info from array error.') + self._assert_rest_result(result, msg) + + if "data" in result: + for item in result["data"]: + fc_ports.append(item) + return fc_ports + + def create_clone_pair(self, source_id, target_id, clone_speed): + url = "/clonepair/relation" + data = {"copyRate": clone_speed, + "sourceID": source_id, + "targetID": target_id, + "isNeedSynchronize": "0"} + result = self.call(url, data, "POST") + self._assert_rest_result(result, 'Create ClonePair error, source_id ' + 'is %s.' % source_id) + return result['data']['ID'] + + def sync_clone_pair(self, pair_id): + url = "/clonepair/synchronize" + data = {"ID": pair_id, "copyAction": 0} + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Sync ClonePair error, pair is ' + '%s.' % pair_id) + + def get_clone_pair_info(self, pair_id): + url = "/clonepair/%s" % pair_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Get ClonePair %s error.' % pair_id) + return result.get('data', {}) + + def delete_clone_pair(self, pair_id, delete_dst_lun=False): + data = {"ID": pair_id, + "isDeleteDstLun": delete_dst_lun} + url = "/clonepair/%s" % pair_id + result = self.call(url, data, "DELETE") + if result['error']['code'] == constants.CLONE_PAIR_NOT_EXIST: + LOG.warning('ClonePair %s to delete not exist.', pair_id) + return + self._assert_rest_result(result, 'Delete ClonePair %s error.' + % pair_id) + + def get_fc_initiator_info(self, ininame): + """Check whether the fc initiator is already added on the array.""" + url = "/fc_initiator/" + ininame + result = self.call(url, None, "GET") + error_code = result['error']['code'] + if error_code != 0: + return None + return result.get("data") + + def get_host_lun_info(self, host_id): + url = ("/lun/associate?ASSOCIATEOBJTYPE=21" + "&ASSOCIATEOBJID=%s" % host_id) + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find host lun info error.')) + return result.get("data") + + def get_lun_map_host_info(self, lun_id): + url = "/host/associate?ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=" + lun_id + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find lun map host info error.')) + return result.get("data") + + def get_hypermetro_by_local_lun_id(self, lun_id): + url = "/HyperMetroPair?filter=LOCALOBJID::%s" % lun_id + result = self.call(url, None, "GET") + msg = _('Get hypermetro by local lun id %s error.') % lun_id + self._assert_rest_result(result, msg) + if result.get('data'): + return result['data'][0] + return {} + + def get_lun_info_by_wwn(self, wwn): + url = "/lun?filter=WWN::" + wwn + result = self.call(url, None, "GET") + self._assert_rest_result(result, _('Find lun info by wwn error.')) + if result.get('data'): + return result['data'][0] + return {} diff --git a/PowerVC/smartx.py b/PowerVC/smartx.py new file mode 100644 index 0000000..59b01e0 --- /dev/null +++ b/PowerVC/smartx.py @@ -0,0 +1,244 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import excutils + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants +from cinder.volume import qos_specs + +LOG = logging.getLogger(__name__) + + +class SmartQos(object): + def __init__(self, client): + self.client = client + + @staticmethod + def get_qos_by_volume_type(volume_type): + # We prefer the qos_specs association + # and override any existing extra-specs settings + # if present. + if not volume_type: + return {} + + qos_specs_id = volume_type.get('qos_specs_id') + if not qos_specs_id: + return {} + + qos = {} + ctxt = context.get_admin_context() + consumer = qos_specs.get_qos_specs(ctxt, qos_specs_id)['consumer'] + if consumer == 'front-end': + return + + kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] + LOG.info('The QoS sepcs is: %s.', kvs) + + for k, v in kvs.items(): + if k not in constants.QOS_SPEC_KEYS: + msg = _('Invalid QoS %s specification.') % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if k != 'IOType' and int(v) <= 0: + msg = _('QoS config is wrong. %s must > 0.') % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + qos[k.upper()] = v + + if 'IOTYPE' not in qos or qos['IOTYPE'] not in constants.QOS_IOTYPES: + msg = _('IOType value must be in %(valid)s.' + ) % {'valid': constants.QOS_IOTYPES} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if len(qos) < 2: + msg = _('QoS policy must specify IOType and one of QoS specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + for upper_limit in constants.UPPER_LIMIT_KEYS: + for lower_limit in constants.LOWER_LIMIT_KEYS: + if upper_limit in qos and lower_limit in qos: + msg = (_('QoS policy upper_limit and lower_limit ' + 'conflict, QoS policy: %(qos_policy)s.') + % {'qos_policy': qos}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + return qos + + def _is_high_priority(self, qos): + """Check QoS priority.""" + for key, value in qos.items(): + if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): + return True + + return False + + @utils.synchronized('huawei_qos', external=True) + def add(self, qos, lun_id): + policy_id = None + try: + # Check QoS priority. + if self._is_high_priority(qos): + self.client.change_lun_priority(lun_id) + # Create QoS policy and activate it. + policy_id = self.client.create_qos_policy(qos, lun_id) + self.client.activate_deactivate_qos(policy_id, True) + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + if policy_id is not None: + self.client.delete_qos_policy(policy_id) + + @utils.synchronized('huawei_qos', external=True) + def remove(self, qos_id, lun_id): + qos_info = self.client.get_qos_info(qos_id) + lun_list = self.client.get_lun_list_in_qos(qos_id, qos_info) + if len(lun_list) <= 1: + qos_status = qos_info['RUNNINGSTATUS'] + # 2: Active status. + if qos_status != constants.STATUS_QOS_INACTIVATED: + self.client.activate_deactivate_qos(qos_id, False) + self.client.delete_qos_policy(qos_id) + else: + self.client.remove_lun_from_qos(lun_id, lun_list, qos_id) + + +class SmartPartition(object): + def __init__(self, client): + self.client = client + + def add(self, opts, lun_id): + if opts['smartpartition'] != 'true': + return + if not opts['partitionname']: + raise exception.InvalidInput( + reason=_('Partition name is None, please set ' + 'smartpartition:partitionname in key.')) + + partition_id = self.client.get_partition_id_by_name( + opts['partitionname']) + if not partition_id: + raise exception.InvalidInput( + reason=(_('Can not find partition id by name %(name)s.') + % {'name': opts['partitionname']})) + + self.client.add_lun_to_partition(lun_id, partition_id) + + +class SmartCache(object): + def __init__(self, client): + self.client = client + + def add(self, opts, lun_id): + if opts['smartcache'] != 'true': + return + if not opts['cachename']: + raise exception.InvalidInput( + reason=_('Cache name is None, please set ' + 'smartcache:cachename in key.')) + + cache_id = self.client.get_cache_id_by_name(opts['cachename']) + if not cache_id: + raise exception.InvalidInput( + reason=(_('Can not find cache id by cache name %(name)s.') + % {'name': opts['cachename']})) + + self.client.add_lun_to_cache(lun_id, cache_id) + + +class SmartX(object): + def __init__(self, client): + self.client = client + + def get_smartx_specs_opts(self, opts): + # Check that smarttier is 0/1/2/3 + opts = self.get_smarttier_opts(opts) + opts = self.get_smartthin_opts(opts) + opts = self.get_smartcache_opts(opts) + opts = self.get_smartpartition_opts(opts) + opts = self.get_controller_opts(opts) + return opts + + def get_smarttier_opts(self, opts): + if opts['smarttier'] == 'true': + if not opts['policy']: + opts['policy'] = '1' + elif opts['policy'] not in ['0', '1', '2', '3']: + raise exception.InvalidInput( + reason=(_('Illegal value specified for smarttier: ' + 'set to either 0, 1, 2, or 3.'))) + else: + opts['policy'] = '0' + + return opts + + def get_smartthin_opts(self, opts): + if opts['thin_provisioning_support'] == 'true': + if opts['thick_provisioning_support'] == 'true': + raise exception.InvalidInput( + reason=(_('Illegal value specified for thin: ' + 'Can not set thin and thick at the same time.'))) + else: + opts['LUNType'] = constants.THIN_LUNTYPE + if opts['thick_provisioning_support'] == 'true': + opts['LUNType'] = constants.THICK_LUNTYPE + + return opts + + def get_smartcache_opts(self, opts): + if opts['smartcache'] == 'true': + if not opts['cachename']: + raise exception.InvalidInput( + reason=_('Cache name is None, please set ' + 'smartcache:cachename in key.')) + else: + opts['cachename'] = None + + return opts + + def get_smartpartition_opts(self, opts): + if opts['smartpartition'] == 'true': + if not opts['partitionname']: + raise exception.InvalidInput( + reason=_('Partition name is None, please set ' + 'smartpartition:partitionname in key.')) + else: + opts['partitionname'] = None + + return opts + + def get_controller_opts(self, opts): + if opts['huawei_controller'] == 'true': + if not opts['controllername']: + raise exception.InvalidInput( + reason=_('Controller name is None, please set ' + 'controllername:controllername in key.')) + else: + controller_name = opts['controllername'] + controller_id = self.client.get_controller_by_name( + controller_name) + opts['controllerid'] = controller_id + else: + opts['controllerid'] = None + + return opts diff --git a/PowerVC/ssh_client.py b/PowerVC/ssh_client.py new file mode 100644 index 0000000..f89e315 --- /dev/null +++ b/PowerVC/ssh_client.py @@ -0,0 +1,2362 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common classes for Huawei OceanStor T series storage arrays. + +The common classes provide the drivers command line operation using SSH. +""" + +import base64 +import re +import six +import socket +import threading +import time +from defusedxml import ElementTree as ET + +from oslo_log import log as logging +from oslo_utils import excutils + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder import ssh_utils +from cinder import utils +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume import utils as volume_utils +from cinder.volume import volume_types + +DJ_crypt_available = True +try: + import kmc.kmc + K = kmc.kmc.API() + kmc_domain = kmc.kmc.KMC_DOMAIN.DEFAULT +except ImportError: + DJ_crypt_available = False + +LOG = logging.getLogger(__name__) + +HOST_GROUP_NAME = 'HostGroup_OpenStack' +HOST_NAME_PREFIX = 'Host_' +VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_' +HOST_PORT_PREFIX = 'HostPort_' +HOST_LUN_ERR_MSG = 'host LUN is mapped or does not exist' +contrs = ['A', 'B'] + + +def ssh_read(user, channel, cmd, timeout): + """Get results of CLI commands.""" + result = '' + output = None + channel.settimeout(timeout) + while True: + try: + output = channel.recv(8192) + result = result + output + except socket.timeout as err: + msg = _('ssh_read: Read SSH timeout. %s') % err + LOG.error(msg) + raise err + else: + # CLI returns welcome information when first log in. So need to + # deal differently. + if not re.search('Welcome', result): + # Complete CLI response starts with CLI cmd and + # ends with "username:/>". + if result.startswith(cmd) and result.endswith(user + ':/>'): + break + # Some commands need to send 'y'. + elif re.search('(y/n)|y or n', result): + break + # Reach maximum limit of SSH connection. + elif re.search('No response message', result): + msg = _('No response message. Please check system status.') + LOG.error(msg) + raise exception.CinderException(msg) + elif re.search('relogin', result): + msg = _('The client is reject by the storate server ') + LOG.error(msg) + raise exception.CinderException(msg) + elif (re.search(user + ':/>' + cmd, result) and + result.endswith(user + ':/>')): + break + if not output: + LOG.error('Output is empty.') + break + + # Filter the last line: username:/> . + result = '\r\n'.join(result.split('\r\n')[:-1]) + # Filter welcome information. + index = result.find(user + ':/>') + + return (result[index:] if index > -1 else result) + + +class TseriesClient(object): + """Common class for Huawei T series storage arrays.""" + + def __init__(self, configuration=None): + self.configuration = configuration + self.xml_file_path = configuration.cinder_huawei_conf_file + self.login_info = {} + self.lun_distribution = [0, 0] + self.hostgroup_id = None + self.ssh_pool = None + self.lock_ip = threading.Lock() + self.luncopy_list = [] # To store LUNCopy name + + def do_setup(self, context): + """Check config file.""" + LOG.debug('do_setup') + + self._check_conf_file() + self.login_info = self._get_login_info() + exist_luns = self._get_all_luns_info() + self.lun_distribution = self._get_lun_distribution_info(exist_luns) + self.luncopy_list = self._get_all_luncopy_name() + self.hostgroup_id = self._get_hostgroup_id(HOST_GROUP_NAME) + + def check_storage_pools(self): + conf_pools = [] + root = self.parse_xml_file(self.xml_file_path) + pools_conf = root.findall('LUN/StoragePool') + for pool in pools_conf: + conf_pools.append(pool.attrib['Name'].strip()) + + thick_pools = self._get_dev_pool_info('Thick') + thick_infos = {} + for pool in thick_pools: + thick_infos[pool[5]] = pool[0] + + thin_pools = self._get_dev_pool_info('Thin') + thin_infos = {} + for pool in thin_pools: + thin_infos[pool[1]] = pool[0] + + for pool in conf_pools: + if pool not in thick_infos and pool not in thin_infos: + err_msg = (_('Storage pool %s does not exist on the array.') + % pool) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def parse_xml_file(self, xml_file_path): + """Get root of xml file.""" + try: + tree = ET.parse(xml_file_path) + root = tree.getroot() + return root + except IOError as err: + LOG.error('parse_xml_file: %s.', err) + raise + + def get_xml_item(self, xml_root, item): + """Get the given item details. + + :param xml_root: The root of xml tree + :param item: The tag need to get + :return: A dict contains all the config info of the given item. + """ + items_list = [] + items = xml_root.findall(item) + for item in items: + tmp_dict = {'text': None, 'attrib': {}} + if item.text: + tmp_dict['text'] = item.text.strip() + for key, val in item.attrib.items(): + if val: + item.attrib[key] = val.strip() + tmp_dict['attrib'] = item.attrib + items_list.append(tmp_dict) + return items_list + + def get_conf_host_os_type(self, host_ip): + """Get host OS type from xml config file. + + :param host_ip: The IP of Nova host + :param config: xml config file + :return: host OS type + """ + os_conf = {} + root = self.parse_xml_file(self.xml_file_path) + hosts_list = self.get_xml_item(root, 'Host') + for host in hosts_list: + os = host['attrib']['OSType'].strip() + ips = [ip.strip() for ip in host['attrib']['HostIP'].split(',')] + os_conf[os] = ips + host_os = None + for k, v in os_conf.items(): + if host_ip in v: + host_os = constants.OS_TYPE.get(k, None) + if not host_os: + host_os = constants.OS_TYPE['Linux'] # Default OS type. + + LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.', + {'ip': host_ip, 'os': host_os}) + + return host_os + + def is_xml_item_exist(self, xml_root, item, attrib_key=None): + """Check if the given item exits in xml config file. + + :param xml_root: The root of xml tree + :param item: The xml tag to check + :param attrib_key: The xml attrib to check + :return: True of False + """ + items_list = self.get_xml_item(xml_root, item) + if attrib_key: + for tmp_dict in items_list: + if tmp_dict['attrib'].get(attrib_key, None): + return True + else: + if items_list and items_list[0]['text']: + return True + return False + + def is_xml_item_valid(self, xml_root, item, valid_list, attrib_key=None): + """Check if the given item is valid in xml config file. + + :param xml_root: The root of xml tree + :param item: The xml tag to check + :param valid_list: The valid item value + :param attrib_key: The xml attrib to check + :return: True of False + """ + items_list = self.get_xml_item(xml_root, item) + if attrib_key: + for tmp_dict in items_list: + value = tmp_dict['attrib'].get(attrib_key, None) + if value not in valid_list: + return False + else: + value = items_list[0]['text'] + if value not in valid_list: + return False + + return True + + def _check_conf_file(self): + """Check config file, make sure essential items are set.""" + root = self.parse_xml_file(self.xml_file_path) + check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1', + 'Storage/UserName', 'Storage/UserPassword'] + for item in check_list: + if not self.is_xml_item_exist(root, item): + err_msg = (_('_check_conf_file: Config file invalid. ' + '%s must be set.') % item) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # Make sure storage pool is set. + if not self.is_xml_item_exist(root, 'LUN/StoragePool', 'Name'): + err_msg = _('_check_conf_file: Config file invalid. ' + 'StoragePool must be set.') + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # If setting os type, make sure it valid. + if self.is_xml_item_exist(root, 'Host', 'OSType'): + os_list = constants.OS_TYPE.keys() + if not self.is_xml_item_valid(root, 'Host', os_list, 'OSType'): + err_msg = (_('_check_conf_file: Config file invalid. ' + 'Host OSType is invalid.\n' + 'The valid values are: %(os_list)s') + % {'os_list': os_list}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + def _get_login_info(self): + """Get login IP, username and password from config file.""" + logininfo = {} + filename = self.configuration.cinder_huawei_conf_file + tree = ET.parse(filename) + root = tree.getroot() + logininfo['ControllerIP0'] = ( + root.findtext('Storage/ControllerIP0').strip()) + logininfo['ControllerIP1'] = ( + root.findtext('Storage/ControllerIP1').strip()) + + need_encode = False + for key in ['UserName', 'UserPassword']: + node = root.find('Storage/%s' % key) + node_text = node.text.strip() + # Prefix !$$$ means encoded already. + if node_text.find('!$$$') > -1: + logininfo_key = base64.b64decode(node_text[4:]) + if DJ_crypt_available and key == "UserPassword": + logininfo_key = K.decrypt(kmc_domain, logininfo_key) + logininfo[key] = logininfo_key + else: + if DJ_crypt_available and key == "UserPassword": + logininfo[key] = K.decrypt(kmc_domain, node_text) + else: + logininfo[key] = node_text + node.text = '!$$$' + base64.b64encode(node_text) + need_encode = True + if need_encode: + self._change_file_mode(filename) + try: + tree.write(filename, 'UTF-8') + except Exception as err: + LOG.info('_get_login_info: %s', err) + + return logininfo + + def _change_file_mode(self, filepath): + utils.execute('chmod', '600', filepath, run_as_root=True) + + def _get_lun_distribution_info(self, luns): + """Get LUN distribution information. + + For we have two controllers for each array, we want to make all + LUNs(just for Thick LUN) distributed evenly. The driver uses the + LUN distribution info to determine in which controller to create + a new LUN. + + """ + + ctr_info = [0, 0] + for lun in luns: + if (lun[6].startswith(VOL_AND_SNAP_NAME_PREFIX) and + lun[8] == 'THICK'): + if lun[4] == 'A': + ctr_info[0] += 1 + else: + ctr_info[1] += 1 + return ctr_info + + def check_for_setup_error(self): + pass + + def _get_all_luncopy_name(self): + cli_cmd = 'showluncopy' + out = self._execute_cli(cli_cmd) + luncopy_ids = [] + if re.search('LUN Copy Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[0].startswith(VOL_AND_SNAP_NAME_PREFIX): + luncopy_ids.append(tmp_line[0]) + return luncopy_ids + + def _get_extended_lun(self, luns): + extended_dict = {} + for lun in luns: + if lun[6].startswith('ext'): + vol_name = lun[6].split('_')[1] + add_ids = extended_dict.get(vol_name, []) + add_ids.append(lun[0]) + extended_dict[vol_name] = add_ids + return extended_dict + + def check_volume_exist_on_array(self, volume): + """Check whether the volume exists on the array. + + If the volume exists on the array, return the LUN ID. + If not exists, return None. + """ + lun_id = volume.get('provider_location') + if not lun_id: + LOG.warning("No LUN ID recorded for volume %s, find it by " + "Name now.", volume['id']) + volume_name = self._name_translate(volume['name']) + lun_id = self._get_lun_id(volume_name) + if not lun_id: + # We won't raise a error here, let's the caller decide whether + # to raise or not. + LOG.warning("Volume %s not exists on the array.", + volume['id']) + return None + + metadata = huawei_utils.get_volume_metadata(volume) + lun_wwn = metadata.get('lun_wwn') if metadata else None + if not lun_wwn: + LOG.warning("No LUN WWN recorded for volume %s", volume['id']) + + if not self.check_lun_exist(lun_id, lun_wwn): + return None + return lun_id + + def _get_lun_wwn(self, lun_id): + cli_cmd = ('showlun -lun %s' % lun_id) + out = self._execute_cli(cli_cmd) + if re.search('LUN Information', out): + try: + line = out.split('\r\n')[6] + lun_wwn = line.split()[3] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + LOG.debug('Got LUN WWN %(lun_wwn)s for LUN %(lun_id)s' + % {'lun_wwn': lun_wwn, + 'lun_id': lun_id}) + elif re.search('The object does not exist', out): + lun_wwn = None + else: + err_msg = (_("Get LUN wwn error. CLI out: %s") % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return lun_wwn + + @utils.synchronized('huawei', external=False) + def create_volume(self, volume): + """Create a new volume.""" + volume_name = self._name_translate(volume['name']) + + LOG.debug('create_volume: volume name: %s' % volume_name) + + self.update_login_info() + if int(volume['size']) == 0: + volume_size = '100M' + else: + volume_size = '%sG' % volume['size'] + parameters = self._parse_volume_type(volume) + lun_id = self._create_volume(volume_name, volume_size, parameters) + count = 0 + max_wait_time = 300 + while self._is_lun_normal(lun_id) is False: + if count >= max_wait_time: + err_msg = (_('LUN %(lun_id)s is still not normal after' + ' %(max_wait_time)s seconds wait.') + % {'lun_id': lun_id, + 'max_wait_time': max_wait_time}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + LOG.debug('LUN %s is not normal, sleep 1s to wait.', + lun_id) + time.sleep(1) + count = count + 1 + + try: + lun_wwn = self._get_lun_wwn(lun_id) + except Exception: + LOG.warning("Get LUN wwn error, setting it to 'None'.") + lun_wwn = 'None' + + model_update = {} + metadata = huawei_utils.get_volume_metadata(volume) + metadata.update({'lun_wwn': lun_wwn}) + model_update['metadata'] = metadata + model_update['provider_location'] = lun_id + + return model_update + + def _name_translate(self, name): + """Form new names for volume and snapshot. + + Form new names for volume and snapshot because of + 32-character limit on names. + """ + newname = VOL_AND_SNAP_NAME_PREFIX + six.text_type(hash(name)) + + LOG.debug('_name_translate: Name in cinder: %(old)s, new name in ' + 'storage system: %(new)s' % {'old': name, 'new': newname}) + + return newname + + def update_login_info(self): + """Update user name and password.""" + self.login_info = self._get_login_info() + + def _get_opts_from_specs(self, opts_capabilite, specs): + opts = {} + for key, value in specs.items(): + # Get the scope, if using scope format + scope = None + key_split = key.split(':') + if len(key_split) > 2 and key_split[0] != "capabilities": + continue + + if len(key_split) == 1: + key = key_split[0] + else: + scope = key_split[0] + key = key_split[1] + + if scope: + scope = scope.lower() + if key: + key = key.lower() + + # We generally do not look at capabilities in the driver, but + # replication is a special case where the user asks for + # a volume to be replicated, and we want both the scheduler and + # the driver to act on the value. + if ((not scope or scope == 'capabilities') and + key in opts_capabilite): + words = value.split() + + if (words and len(words) == 2 and words[0] == ''): + del words[0] + value = words[0] + opts[key] = value.lower() + else: + LOG.error('Capabilities must be specified as ' + '\' True\' or \' False\'.') + + return opts + + def _set_volume_type_by_specs(self, specs, params): + '''Support LUN type configuration in SmartX.''' + + thin_key = 'thin_provisioning_support' + thick_key = 'thick_provisioning_support' + opts_capabilite = {thin_key: False, + thick_key: True} + + opts = self._get_opts_from_specs(opts_capabilite, specs) + if (thin_key not in opts and thick_key not in opts): + return + if (thin_key in opts and thick_key in opts + and opts[thin_key] == 'true' and opts[thick_key] == 'true'): + raise exception.InvalidInput( + reason=_('Illegal value specified for thin: ' + 'Can not set thin and thick at the same time.')) + elif (thin_key in opts and opts[thin_key] == 'true'): + params['LUNType'] = 'Thin' + elif (thick_key in opts + and opts[thick_key] == 'true'): + params['LUNType'] = 'Thick' + + def _parse_volume_type(self, volume): + """Parse volume type form extra_specs by type id. + + The keys in extra_specs must be consistent with the element in config + file. And the keys can starts with "drivers" to make them distinguished + from capabilities keys, if you like. + + """ + + params = self._get_lun_params(volume) + typeid = volume['volume_type_id'] + if typeid is not None: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, typeid) + specs = volume_type.get('extra_specs') + self._set_volume_type_by_specs(specs, params) + for key, value in specs.items(): + key_split = key.split(':') + if len(key_split) > 1: + if key_split[0] == 'drivers': + key = key_split[1] + else: + continue + else: + key = key_split[0] + + if key in params.keys(): + params[key] = value.strip() + else: + conf = self.configuration.cinder_huawei_conf_file + LOG.warning('_parse_volume_type: Unacceptable ' + 'parameter %(key)s. Please check this key' + ' in extra_specs and make ' + 'it consistent with the element in ' + 'configuration file %(conf)s.', + {'key': key, + 'conf': conf}) + + return params + + def _create_volume(self, name, size, params): + """Create a new volume with the given name and size.""" + cli_cmd = ('createlun -n %(name)s -lunsize %(size)s ' + '-wrtype %(wrtype)s ' % {'name': name, + 'size': size, + 'wrtype': params['WriteType']}) + + # If write type is "write through", no need to set mirror switch. + if params['WriteType'] != '2': + cli_cmd = cli_cmd + '-mirrorsw 1 ' + + # Differences exist between "Thin" and "thick" LUN in CLI commands. + luntype = params['LUNType'] + ctr = None + if luntype == 'Thin': + cli_cmd = cli_cmd + ('-pool %(pool)s ' + % {'pool': params['StoragePool']}) + else: + # Make LUN distributed to A/B controllers evenly, + # just for Thick LUN. + ctr = self._calculate_lun_ctr() + cli_cmd = cli_cmd + ('-rg %(raidgroup)s -susize %(susize)s ' + '-c %(ctr)s ' + % {'raidgroup': params['StoragePool'], + 'susize': params['StripUnitSize'], + 'ctr': ctr}) + + prefetch_value_or_times = '' + pretype = '-pretype %s ' % params['PrefetchType'] + # If constant prefetch, we should specify prefetch value. + if params['PrefetchType'] == '1': + prefetch_value_or_times = '-value %s' % params['PrefetchValue'] + # If variable prefetch, we should specify prefetch multiple. + elif params['PrefetchType'] == '2': + prefetch_value_or_times = '-times %s' % params['PrefetchTimes'] + + cli_cmd = cli_cmd + pretype + prefetch_value_or_times + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_volume', + 'Failed to create volume %s' % name, + cli_cmd, out) + if ctr: + self._update_lun_distribution(ctr) + return self._get_lun_id(name) + + def _calculate_lun_ctr(self): + return ('a' if self.lun_distribution[0] <= self.lun_distribution[1] + else 'b') + + def _update_lun_distribution(self, ctr): + index = (0 if ctr == 'a' else 1) + self.lun_distribution[index] += 1 + + def _get_lun_params(self, volume): + params_conf = self._parse_conf_lun_params() + pool_name = volume_utils.extract_host(volume['host'], level='pool') + + thick_pools = self._get_dev_pool_info('Thick') + thick_infos = {} + for pool in thick_pools: + thick_infos[pool[5]] = pool[0] + + if pool_name in thick_infos: + params_conf['LUNType'] = 'Thick' + params_conf['StoragePool'] = thick_infos[pool_name] + return params_conf + + thin_pools = self._get_dev_pool_info('Thin') + thin_infos = {} + for pool in thin_pools: + thin_infos[pool[1]] = pool[0] + if pool_name in thin_infos: + params_conf['LUNType'] = 'Thin' + params_conf['StoragePool'] = thin_infos[pool_name] + return params_conf + + msg = _("Pool does not exist. Pool name: %s.") % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _parse_conf_lun_params(self): + """Get parameters from config file for creating LUN.""" + # Default LUN parameters. + conf_params = {'LUNType': 'Thin', + 'StripUnitSize': '64', + 'WriteType': '1', + 'PrefetchType': '3', + 'PrefetchValue': '0', + 'PrefetchTimes': '0', + 'StoragePool': []} + + root = self.parse_xml_file(self.xml_file_path) + + stripunitsize = root.findtext('LUN/StripUnitSize') + if stripunitsize: + conf_params['StripUnitSize'] = stripunitsize.strip() + writetype = root.findtext('LUN/WriteType') + if writetype: + conf_params['WriteType'] = writetype.strip() + prefetch = root.find('LUN/Prefetch') + if prefetch is not None and prefetch.attrib['Type']: + conf_params['PrefetchType'] = prefetch.attrib['Type'].strip() + if conf_params['PrefetchType'] == '1': + conf_params['PrefetchValue'] = prefetch.attrib['Value'].strip() + elif conf_params['PrefetchType'] == '2': + conf_params['PrefetchTimes'] = prefetch.attrib['Value'].strip() + else: + LOG.debug('_parse_conf_lun_params: Use default prefetch type. ' + 'Prefetch type: Intelligent') + + pools_conf = root.findall('LUN/StoragePool') + for pool in pools_conf: + conf_params['StoragePool'].append(pool.attrib['Name'].strip()) + + return conf_params + + def create_channel(self, client, width, height): + """Invoke an interactive shell session on server.""" + channel = client.invoke_shell() + channel.resize_pty(width, height) + return channel + + @utils.synchronized('huawei-cli', external=False) + def _execute_cli(self, cmd): + """Build SSH connection and execute CLI commands. + + If the connection to first controller timeout, + try to connect to the other controller. + + """ + + if (' -pwd ' not in cmd) and (' -opwd ' not in cmd): + LOG.debug('CLI command: %s' % cmd) + connect_times = 1 + ip0 = self.login_info['ControllerIP0'] + ip1 = self.login_info['ControllerIP1'] + user = self.login_info['UserName'] + pwd = self.login_info['UserPassword'] + if not self.ssh_pool: + self.ssh_pool = ssh_utils.SSHPool(ip0, 22, 30, user, pwd, + max_size=20) + ssh_client = None + while True: + try: + if connect_times == 2: + # Switch to the other controller. + with self.lock_ip: + if ssh_client: + if ssh_client.server_ip == self.ssh_pool.ip: + self.ssh_pool.ip = (ip1 + if self.ssh_pool.ip == ip0 + else ip0) + old_ip = ssh_client.server_ip + # Create a new client to replace the old one. + if getattr(ssh_client, 'chan', None): + ssh_client.chan.close() + ssh_client.close() + ssh_client = self.ssh_pool.create() + self._reset_transport_timeout(ssh_client, 0.1) + else: + self.ssh_pool.ip = ip1 + old_ip = ip0 + + LOG.info('_execute_cli: Can not connect to IP ' + '%(old)s, try to connect to the other ' + 'IP %(new)s.', + {'old': old_ip, 'new': self.ssh_pool.ip}) + + if not ssh_client: + # Get an SSH client from SSH pool. + ssh_client = self.ssh_pool.get() + self._reset_transport_timeout(ssh_client, 0.1) + # "server_ip" shows the IP of SSH server. + if not getattr(ssh_client, 'server_ip', None): + with self.lock_ip: + setattr(ssh_client, 'server_ip', self.ssh_pool.ip) + # An SSH client owns one "chan". + if not getattr(ssh_client, 'chan', None): + setattr(ssh_client, 'chan', + self.create_channel(ssh_client, 600, 800)) + + busyRetryTime = 5 + while True: + if 0 == ssh_client.chan.send(cmd + '\n'): + ssh_client.chan.close() + setattr(ssh_client, 'chan', + self.create_channel(ssh_client, 600, 800)) + ssh_client.chan.send(cmd + '\n') + out = ssh_read(user, ssh_client.chan, cmd, 200) + if out.find('(y/n)') > -1 or out.find('y or n') > -1: + cmd = 'y' + elif (out.find('The system is busy') > -1 + and busyRetryTime > 0): + busyRetryTime = busyRetryTime - 1 + LOG.info("System is busy, retry after sleep 10s.") + time.sleep(10) + elif re.search('Login failed.', out): + err_msg = (_('Login failed when running command:' + ' %(cmd)s, CLI out: %(out)s') + % {'cmd': cmd, + 'out': out}) + if (' -pwd ' in cmd) or (' -opwd ' in cmd): + err_msg = (_('Login failed when running command:' + 'CLI out: %(out)s') + % {'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + # Put SSH client back into SSH pool. + self.ssh_pool.put(ssh_client) + return out + + except Exception as err: + if connect_times < 2: + connect_times += 1 + continue + else: + if self.ssh_pool and ssh_client: + self.ssh_pool.remove(ssh_client) + # Set ssh_pool as None when connect error,or the next + # command connect will also error. + self.ssh_pool = None + LOG.error('_execute_cli: %s', err) + raise err + + def _reset_transport_timeout(self, ssh, time): + transport = ssh.get_transport() + transport.sock.settimeout(time) + + @utils.synchronized('huawei', external=False) + def delete_volume(self, volume): + lun_id = self.check_volume_exist_on_array(volume) + if not lun_id: + LOG.warning("Volume %s not exists on the array.", + volume['id']) + return + + volume_name = self._name_translate(volume['name']) + LOG.debug('delete_volume: volume name: %s' % volume_name) + self.update_login_info() + + map_info = self._get_host_map_info_by_lunid(lun_id) + if map_info and len(map_info) is 1: + self._delete_map(map_info[0][0]) + + added_vol_ids = self._get_extended_lun_member(lun_id) + if added_vol_ids: + self._del_lun_from_extended_lun(lun_id, added_vol_ids) + self._delete_volume(lun_id) + + def check_lun_exist(self, lun_id, lun_wwn=None): + current_wwn = self._get_lun_wwn(lun_id) + if lun_wwn and lun_wwn != current_wwn: + return False + + return (True if current_wwn else False) + + def _get_extended_lun_member(self, lun_id): + cli_cmd = 'showextlunmember -ext %s' % lun_id + out = self._execute_cli(cli_cmd) + + members = [] + if re.search('Extending LUN Member Information', out): + try: + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 3: + continue + if tmp_line[2] != 'Master': + members.append(tmp_line[0]) + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return members + + def _del_lun_from_extended_lun(self, extended_id, added_ids): + cli_cmd = 'rmlunfromextlun -ext %s' % extended_id + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_del_lun_from_extended_lun', + ('Failed to remove LUN from extended ' + 'LUN: %s' % extended_id), + cli_cmd, out) + for id in added_ids: + cli_cmd = 'dellun -lun %s' % id + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_del_lun_from_extended_lun', + 'Failed to delete LUN: %s' % id, + cli_cmd, out) + + def _delete_volume(self, volumeid): + """Run CLI command to delete volume.""" + cli_cmd = 'dellun -force -lun %s' % volumeid + out = self._execute_cli(cli_cmd) + + if re.search('The LUN does not exist', out): + LOG.warning("LUN %s does not exist on array when we" + "deleting it.", volumeid) + return + + self._assert_cli_operate_out('_delete_volume', + ('Failed to delete volume. volume id: %s' + % volumeid), + cli_cmd, out) + + @utils.synchronized('huawei', external=False) + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + We use LUNcopy to copy a new volume from snapshot. + The time needed increases as volume size does. + + """ + + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(volume['name']) + + LOG.debug('create_volume_from_snapshot: snapshot ' + 'name: %(snapshot)s, volume name: %(volume)s' + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self.update_login_info() + snapshot_id = snapshot.get('provider_location', None) + if not snapshot_id: + snapshot_id = self._get_snapshot_id(snapshot_name) + if snapshot_id is None: + err_msg = (_('create_volume_from_snapshot: Snapshot %(name)s ' + 'does not exist.') + % {'name': snapshot_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create a target LUN. + if int(volume['size']) == 0: + volume_size = '%sG' % snapshot['volume_size'] + else: + volume_size = '%sG' % volume['size'] + parameters = self._parse_volume_type(volume) + tgt_vol_id = self._create_volume(volume_name, volume_size, parameters) + self._copy_volume(snapshot_id, tgt_vol_id) + + try: + lun_wwn = self._get_lun_wwn(tgt_vol_id) + except Exception: + LOG.warning("Get LUN wwn error, setting it to 'None'.") + lun_wwn = 'None' + + model_update = {} + metadata = huawei_utils.get_volume_metadata(volume) + metadata.update({'lun_wwn': lun_wwn}) + model_update['metadata'] = metadata + model_update['provider_location'] = tgt_vol_id + + return model_update + + def _copy_volume(self, src_vol_id, tgt_vol_id): + """Copy a volume or snapshot to target volume.""" + luncopy_name = VOL_AND_SNAP_NAME_PREFIX + src_vol_id + '_' + tgt_vol_id + self._create_luncopy(luncopy_name, src_vol_id, tgt_vol_id) + self.luncopy_list.append(luncopy_name) + luncopy_id = self._get_luncopy_info(luncopy_name)[1] + try: + self._start_luncopy(luncopy_id) + self._wait_for_luncopy(luncopy_name) + # Delete the target volume if LUNcopy failed. + except Exception: + with excutils.save_and_reraise_exception(): + # Need to remove the LUNcopy of the volume first. + self._delete_luncopy(luncopy_id) + self.luncopy_list.remove(luncopy_name) + self._delete_volume(tgt_vol_id) + # Need to delete LUNcopy finally. + self._delete_luncopy(luncopy_id) + self.luncopy_list.remove(luncopy_name) + + def _create_luncopy(self, luncopyname, srclunid, tgtlunid): + """Run CLI command to create LUNcopy.""" + cli_cmd = ('createluncopy -n %(name)s -l 4 -slun %(srclunid)s ' + '-tlun %(tgtlunid)s' % {'name': luncopyname, + 'srclunid': srclunid, + 'tgtlunid': tgtlunid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_luncopy', + ('Failed to create LUNcopy %s' + % luncopyname), + cli_cmd, out) + + def _start_luncopy(self, luncopyid): + """Run CLI command to start LUNcopy.""" + cli_cmd = ('chgluncopystatus -luncopy %(luncopyid)s -start' + % {'luncopyid': luncopyid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_start_luncopy', + 'Failed to start LUNcopy %s' % luncopyid, + cli_cmd, out) + + def _wait_for_luncopy(self, luncopyname): + """Wait for LUNcopy to complete.""" + while True: + luncopy_info = self._get_luncopy_info(luncopyname) + # If state is complete + if luncopy_info[3] == 'Complete': + break + # If status is not normal + elif luncopy_info[4] != 'Normal': + err_msg = (_('_wait_for_luncopy: LUNcopy %(luncopyname)s ' + 'status is %(status)s.') + % {'luncopyname': luncopyname, + 'status': luncopy_info[4]}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + time.sleep(10) + + def _get_luncopy_info(self, luncopyname): + """Return a LUNcopy information list.""" + cli_cmd = 'showluncopy' + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('LUN Copy Information', out), + '_get_luncopy_info', + 'No LUNcopy information was found.', + cli_cmd, out) + + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[0] == luncopyname: + return tmp_line + return None + + def _delete_luncopy(self, luncopyid): + """Run CLI command to delete LUNcopy.""" + cli_cmd = 'delluncopy -luncopy %(id)s' % {'id': luncopyid} + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_luncopy', + 'Failed to delete LUNcopy %s' % luncopyid, + cli_cmd, out) + + def create_cloned_volume(self, tgt_volume, src_volume): + src_vol_id = self.check_volume_exist_on_array(src_volume) + if not src_vol_id: + msg = _("Volume %s not exists on the array.") % src_volume['id'] + raise exception.VolumeBackendAPIException(data=msg) + + src_vol_name = self._name_translate(src_volume['name']) + tgt_vol_name = self._name_translate(tgt_volume['name']) + + LOG.debug('create_cloned_volume: src volume: %(src)s, ' + 'tgt volume: %(tgt)s' % {'src': src_vol_name, + 'tgt': tgt_vol_name}) + + self.update_login_info() + + # Create a target volume. + if int(tgt_volume['size']) == 0: + tgt_vol_size = '%sG' % src_vol_name['size'] + else: + tgt_vol_size = '%sG' % tgt_volume['size'] + params = self._parse_volume_type(tgt_volume) + tgt_vol_id = self._create_volume(tgt_vol_name, tgt_vol_size, params) + self._copy_volume(src_vol_id, tgt_vol_id) + + try: + lun_wwn = self._get_lun_wwn(tgt_vol_id) + except Exception: + LOG.warning("Get LUN wwn error, setting it to 'None'.") + lun_wwn = 'None' + + model_update = {} + metadata = huawei_utils.get_volume_metadata(tgt_volume) + metadata.update({'lun_wwn': lun_wwn}) + model_update['metadata'] = metadata + model_update['provider_location'] = tgt_vol_id + + return model_update + + def _get_all_luns_info(self): + cli_cmd = 'showlun' + out = self._execute_cli(cli_cmd) + luns = [] + if re.search('LUN Information', out): + for line in out.split('\r\n')[6:-2]: + new_line = line.replace('Not format', 'Notformat').split() + if len(new_line) < 7: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + luns.append(new_line) + return luns + + def _get_lun_id(self, lun_name): + luns = self._get_all_luns_info() + if luns: + for lun in luns: + if lun[6] == lun_name: + return lun[0] + return None + + def _get_lun_status(self, lun_id): + status = None + cli_cmd = ('showlun -lun %s' % lun_id) + out = self._execute_cli(cli_cmd) + if re.search('LUN Information', out): + try: + line = out.split('\r\n')[7] + status = line.split()[2] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + elif re.search('The object does not exist', out): + err_msg = _('LUN %s does not exist on array.') % lun_id + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + err_msg = _('Unexpected cli out: %s') % out + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return status + + def _wait_for_lun_status(self, lun_id, expected_status): + """Wait for LUN to be the expected status.""" + while True: + status = self._get_lun_status(lun_id) + if status in expected_status: + break + elif status == 'Fault': + err_msg = (_('_wait_for_lun_status: LUN %(lun_id)s ' + 'status is %(status)s.') + % {'lun_id': lun_id, + 'status': status}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + LOG.info('LUN %s is not ready, waiting 2s...', lun_id) + time.sleep(2) + + def extend_volume(self, volume, new_size): + lun_id = self.check_volume_exist_on_array(volume) + if not lun_id: + msg = _("Volume %s not exists on the array.") % volume['id'] + raise exception.VolumeBackendAPIException(data=msg) + + extended_vol_name = self._name_translate(volume['name']) + + added_vol_ids = self._get_extended_lun_member(lun_id) + added_vol_name = ('ext_' + extended_vol_name.split('_')[1] + '_' + + six.text_type(len(added_vol_ids))) + added_vol_size = ( + six.text_type(int(new_size) - int(volume['size'])) + 'G') + + LOG.debug('extend_volume: extended volume name: %(extended_name)s ' + 'new added volume name: %(added_name)s ' + 'new added volume size: %(added_size)s' + % {'extended_name': extended_vol_name, + 'added_name': added_vol_name, + 'added_size': added_vol_size}) + + parameters = self._parse_volume_type(volume) + if ('LUNType' in parameters and parameters['LUNType'] == 'Thin'): + err_msg = _("extend_volume: Thin LUN can't be extended.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + added_vol_id = self._create_volume(added_vol_name, added_vol_size, + parameters) + try: + # Source LUN must be 'Normal' to extend. + self._wait_for_lun_status(lun_id, ('Normal')) + + # Added LUN must be 'Formatting' or 'Normal'. + self._wait_for_lun_status(added_vol_id, ('Formatting', 'Normal')) + self._extend_volume(lun_id, added_vol_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_volume(added_vol_id) + + added_vol_ids.append(added_vol_id) + + def _extend_volume(self, extended_vol_id, added_vol_id): + cli_cmd = ('addluntoextlun -extlun %(extended_vol)s ' + '-lun %(added_vol)s' % {'extended_vol': extended_vol_id, + 'added_vol': added_vol_id}) + out = self._execute_cli(cli_cmd) + self._assert_cli_operate_out('_extend_volume', + ('Failed to extend volume %s' + % extended_vol_id), + cli_cmd, out) + + @utils.synchronized('huawei', external=False) + def create_snapshot(self, snapshot): + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug('create_snapshot: snapshot name: %(snapshot)s, ' + 'volume name: %(volume)s' + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + if self._resource_pool_enough() is False: + err_msg = (_('create_snapshot: ' + 'Resource pool needs 1GB valid size at least.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + volume = snapshot['volume'] + lun_id = volume.get('provider_location', None) + if lun_id is None: + lun_id = self._get_lun_id(volume_name) + if lun_id is None: + LOG.error('create_snapshot: Volume %(name)s does not exist.', + {'name': volume_name}) + raise exception.VolumeNotFound(volume_id=volume_name) + + self._create_snapshot(snapshot_name, lun_id) + snapshot_id = self._get_snapshot_id(snapshot_name) + try: + self._active_snapshot(snapshot_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_snapshot(snapshot_id) + + return snapshot_id + + def _resource_pool_enough(self): + """Check whether resource pools' valid size is more than 1GB.""" + cli_cmd = 'showrespool' + out = self._execute_cli(cli_cmd) + try: + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 4: + continue + if float(tmp_line[3]) < 1024.0: + return False + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return True + + def _create_snapshot(self, snapshotname, srclunid): + """Create a snapshot with snapshot name and source LUN ID.""" + cli_cmd = ('createsnapshot -lun %(lunid)s -n %(snapname)s' + % {'lunid': srclunid, + 'snapname': snapshotname}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_snapshot', + ('Failed to create snapshot %s' + % snapshotname), + cli_cmd, out) + + def _get_snapshot_id(self, snapshotname): + cli_cmd = 'showsnapshot' + out = self._execute_cli(cli_cmd) + if re.search('Snapshot Information', out): + try: + for line in out.split('\r\n')[6:-2]: + emp_line = line.split() + if len(emp_line) < 2: + continue + if emp_line[0] == snapshotname: + return emp_line[1] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return None + + def _active_snapshot(self, snapshotid): + """Run CLI command to active snapshot.""" + cli_cmd = ('actvsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_active_snapshot', + ('Failed to active snapshot %s' + % snapshotid), + cli_cmd, out) + + def delete_snapshot(self, snapshot): + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug('delete_snapshot: snapshot name: %(snapshot)s, ' + 'volume name: %(volume)s' % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self.update_login_info() + snapshot_id = snapshot.get('provider_location', None) + if ((snapshot_id is not None) and + self._check_snapshot_created(snapshot_id)): + # Not allow to delete snapshot if it is copying. + if self._snapshot_in_luncopy(snapshot_id): + err_msg = (_('delete_snapshot: Can not delete snapshot %s ' + 'for it is a source LUN of LUNCopy.') + % snapshot_name) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + self._delete_snapshot(snapshot_id) + else: + err_msg = (_('delete_snapshot: Snapshot %(snap)s does not exist.') + % {'snap': snapshot_name}) + LOG.warning(err_msg) + + def _check_snapshot_created(self, snapshot_id): + cli_cmd = 'showsnapshot -snapshot %(snap)s' % {'snap': snapshot_id} + out = self._execute_cli(cli_cmd) + if re.search('Snapshot Information', out): + return True + elif re.search('Current LUN is not a LUN snapshot', out): + return False + else: + msg = (_("Check snapshot created error. CLI out: %s") % out) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _snapshot_in_luncopy(self, snapshot_id): + for name in self.luncopy_list: + if name.startswith(VOL_AND_SNAP_NAME_PREFIX + snapshot_id): + return True + return False + + def _delete_snapshot(self, snapshotid): + """Send CLI command to delete snapshot. + + Firstly, disable the snapshot, then delete it. + + """ + + cli_cmd = ('disablesnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_snapshot', + ('Failed to disable snapshot %s' + % snapshotid), + cli_cmd, out) + + cli_cmd = ('delsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_snapshot', + ('Failed to delete snapshot %s' + % snapshotid), + cli_cmd, out) + + def _assert_cli_out(self, condition, func, msg, cmd, cliout): + """Assertion for CLI query out.""" + if not condition: + err_msg = (_('%(func)s: %(msg)s\nCLI command: %(cmd)s\n' + 'CLI out: %(out)s') % {'func': func, + 'msg': msg, + 'cmd': cmd, + 'out': cliout}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _assert_cli_operate_out(self, func, msg, cmd, cliout): + """Assertion for CLI out string: command operates successfully.""" + condition = (re.search('command operates successfully', cliout) + or re.search('The name exists already', cliout)) + self._assert_cli_out(condition, func, msg, cmd, cliout) + + def _is_lun_normal(self, lun_id): + """Check whether the LUN is normal.""" + cli_cmd = ('showlun -lun %s' % lun_id) + out = self._execute_cli(cli_cmd) + if re.search('LUN Information', out): + try: + line = out.split('\r\n')[7] + line = line.replace('Not format', 'Notformat') + status = line.split()[2] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + LOG.debug('LUN: %(lun_id)s, current status: %(status)s' + % {'lun_id': lun_id, + 'status': status}) + if (status == 'Normal') or (status == 'Formatting'): + return True + + return False + + def map_volume(self, host_id, lun_id): + """Map a volume to a host.""" + # Map a LUN to a host if not mapped. + + hostlun_id = None + map_info = self.get_host_map_info(host_id) + # Make sure the host LUN ID starts from 1. + new_hostlun_id = 1 + new_hostlunid_found = False + if map_info: + for maping in map_info: + if maping[2] == lun_id: + hostlun_id = maping[4] + break + elif not new_hostlunid_found: + if new_hostlun_id < int(maping[4]): + new_hostlunid_found = True + else: + new_hostlun_id = int(maping[4]) + 1 + + if not hostlun_id: + cli_cmd = ('addhostmap -host %(host_id)s -devlun %(lunid)s ' + '-hostlun %(hostlunid)s' + % {'host_id': host_id, + 'lunid': lun_id, + 'hostlunid': new_hostlun_id}) + out = self._execute_cli(cli_cmd) + # Check whether the hostlunid has already been assigned. + condition = re.search(HOST_LUN_ERR_MSG, out) + while condition: + new_hostlun_id = new_hostlun_id + 1 + cli_cmd = ('addhostmap -host %(host_id)s -devlun %(lunid)s ' + '-hostlun %(hostlunid)s' + % {'host_id': host_id, + 'lunid': lun_id, + 'hostlunid': new_hostlun_id}) + out = self._execute_cli(cli_cmd) + condition = re.search(HOST_LUN_ERR_MSG, out) + + msg = ('Failed to map LUN %s to host %s. host LUN ID: %s' + % (lun_id, host_id, new_hostlun_id)) + self._assert_cli_operate_out('map_volume', msg, cli_cmd, out) + + hostlun_id = new_hostlun_id + + return hostlun_id + + def add_host(self, host_name, host_ip, initiator=None): + """Create a host and add it to hostgroup.""" + # Create an OpenStack hostgroup if not created before. + hostgroup_name = HOST_GROUP_NAME + self.hostgroup_id = self._get_hostgroup_id(hostgroup_name) + if self.hostgroup_id is None: + try: + self._create_hostgroup(hostgroup_name) + self.hostgroup_id = self._get_hostgroup_id(hostgroup_name) + except Exception: + self.hostgroup_id = self._get_hostgroup_id(hostgroup_name) + if self.hostgroup_id is None: + err_msg = (_('error to create hostgroup: %s') + % hostgroup_name) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create a host and add it to the hostgroup. + # Check the old host name to support the upgrade from grizzly to + # higher versions. + if initiator: + old_host_name = HOST_NAME_PREFIX + six.text_type(hash(initiator)) + old_host_id = self._get_host_id(old_host_name, self.hostgroup_id) + if old_host_id is not None: + return old_host_id + + if host_name and (len(host_name) > 26): + host_name = six.text_type(hash(host_name)) + host_name = HOST_NAME_PREFIX + host_name + host_id = self._get_host_id(host_name, self.hostgroup_id) + if host_id is None: + os_type = self.get_conf_host_os_type(host_ip) + try: + self._create_host(host_name, self.hostgroup_id, os_type) + host_id = self._get_host_id(host_name, self.hostgroup_id) + except Exception: + host_id = self._get_host_id(host_name, self.hostgroup_id) + if host_id is None: + err_msg = (_('error to create host: %s') % host_name) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return host_id + + def _get_hostgroup_id(self, groupname): + """Get the given hostgroup ID. + + If the hostgroup not found, return None. + + """ + + cli_cmd = 'showhostgroup' + out = self._execute_cli(cli_cmd) + if re.search('Host Group Information', out): + try: + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 2: + continue + if tmp_line[1] == groupname: + return tmp_line[0] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return None + + def _create_hostgroup(self, hostgroupname): + """Run CLI command to create host group.""" + cli_cmd = 'createhostgroup -n %(name)s' % {'name': hostgroupname} + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_hostgroup', + ('Failed to Create hostgroup %s.' + % hostgroupname), + cli_cmd, out) + + def _get_host_id(self, hostname, hostgroupid): + """Get the given host ID.""" + cli_cmd = 'showhost -group %(groupid)s' % {'groupid': hostgroupid} + out = self._execute_cli(cli_cmd) + if re.search('Host Information', out): + try: + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 2: + continue + if tmp_line[1] == hostname: + return tmp_line[0] + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return None + + def _create_host(self, hostname, hostgroupid, type): + """Run CLI command to add host.""" + cli_cmd = ('addhost -group %(groupid)s -n %(hostname)s -t %(type)s' + % {'groupid': hostgroupid, + 'hostname': hostname, + 'type': type}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_host', + 'Failed to create host %s' % hostname, + cli_cmd, out) + + def get_host_port_info(self, hostid): + """Run CLI command to get host port information.""" + cli_cmd = ('showhostport -host %(hostid)s' % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + if re.search('Host Port Information', out): + return [line.split() for line in out.split('\r\n')[6:-2]] + else: + return None + + def get_host_map_info(self, hostid): + """Get map information of the given host.""" + + cli_cmd = 'showhostmap -host %(hostid)s' % {'hostid': hostid} + out = self._execute_cli(cli_cmd) + if re.search('Map Information', out): + mapinfo = [] + try: + for line in out.split('\r\n')[6:-2]: + new_line = line.split() + if len(new_line) < 5: + continue + mapinfo.append(new_line) + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + # Sorted by host LUN ID. + return sorted(mapinfo, key=lambda x: int(x[4])) + else: + return None + + def _get_host_map_info_by_lunid(self, lunid): + """Get map information of the given host.""" + + cli_cmd = 'showhostmap -lun %(lunid)s' % {'lunid': lunid} + out = self._execute_cli(cli_cmd) + if re.search('Map Information', out): + mapinfo = [] + try: + for line in out.split('\r\n')[6:-2]: + new_line = line.split() + if len(new_line) < 5: + continue + mapinfo.append(new_line) + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return mapinfo + else: + return None + + def get_lun_details(self, lun_id): + cli_cmd = 'showlun -lun %s' % lun_id + out = self._execute_cli(cli_cmd) + lun_details = {} + if re.search('LUN Information', out): + try: + for line in out.split('\r\n')[4:-2]: + line = line.split('|') + key = ''.join(line[0].strip().split()) + if len(line) < 2: + continue + val = line[1].strip() + lun_details[key] = val + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return lun_details + + def change_lun_ctr(self, lun_id, ctr): + LOG.debug('change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s.' + % {'lun': lun_id, 'ctr': ctr}) + + cli_cmd = 'chglun -lun %s -c %s' % (lun_id, ctr) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('change_lun_ctr', + 'Failed to change owning controller for ' + 'LUN %s' % lun_id, + cli_cmd, out) + + def get_host_id(self, host_name, initiator=None): + # Check the old host name to support the upgrade from grizzly to + # higher versions. + host_id = None + if initiator: + old_host_name = HOST_NAME_PREFIX + str(hash(initiator)) + host_id = self._get_host_id(old_host_name, self.hostgroup_id) + if host_id is None: + if host_name and (len(host_name) > 26): + host_name = str(hash(host_name)) + host_name = HOST_NAME_PREFIX + host_name + host_id = self._get_host_id(host_name, self.hostgroup_id) + if host_id is None: + LOG.warning('remove_map: Host %s does not exist.', + host_name) + return None + + return host_id + + def remove_map(self, lun_id, host_id): + """Remove host map.""" + if host_id is None: + return + map_id = None + map_info = self.get_host_map_info(host_id) + if map_info: + for maping in map_info: + if maping[2] == lun_id: + map_id = maping[0] + break + if map_id is not None: + try: + self._delete_map(map_id) + except Exception: + map_info = self.get_host_map_info(host_id) + if map_info and [x for x in map_info if x[2] == lun_id]: + err_msg = (_('remove_map: Failed to delete host map to ' + 'volume %s.') % lun_id) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + LOG.warning(('remove_map: No map between host %(host_id)s and ' + 'volume %(volume)s.') % {'host_id': host_id, + 'volume': lun_id}) + + def _delete_map(self, mapid, attempts=5): + """Run CLI command to remove map.""" + cli_cmd = 'delhostmap -force -map %(mapid)s' % {'mapid': mapid} + while True: + out = self._execute_cli(cli_cmd) + + # We retry to delete host map 10s later if there are + # IOs accessing the system. + if re.search('command operates successfully', out): + break + else: + if (re.search('there are IOs accessing the system', out) and + (attempts > 0)): + + LOG.debug('_delete_map: There are IOs accessing ' + 'the system. Retry to delete host map ' + '%(mapid)s 10s later.' % {'mapid': mapid}) + + time.sleep(10) + attempts -= 1 + continue + else: + err_msg = (_('_delete_map: Failed to delete host map ' + '%(mapid)s.\nCLI out: %(out)s') + % {'mapid': mapid, + 'times': attempts, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def find_chap_info(self, iscsi_conf, initiator_name): + """Find CHAP info from xml.""" + chapinfo = None + default_chapinfo = None + chap = [] + + for ini in iscsi_conf.get('Initiator', []): + if ini.get('Name') == initiator_name: + chapinfo = ini.get('CHAPinfo') + break + else: + default_chapinfo = ini.get('CHAPinfo') + + if default_chapinfo: + chap = default_chapinfo.split(';') + default_chapinfo = [] + if len(chap) > 1: + default_chapinfo.append(chap[0]) + default_chapinfo.append(chap[1]) + + return default_chapinfo + + if chapinfo: + chap = chapinfo.split(';') + chapinfo = [] + if len(chap) > 1: + chapinfo.append(chap[0]) + chapinfo.append(chap[1]) + + return chapinfo + + def delete_hostport(self, portid, attempts=5): + """Run CLI command to delete host port.""" + cli_cmd = ('delhostport -force -p %(portid)s' % {'portid': portid}) + while True: + out = self._execute_cli(cli_cmd) + # We retry to delete host 10s later if there are + # IOs accessing the system. + if (re.search('the initiator has requests to be performed', out) + and (attempts > 0)): + LOG.debug('delhostport: There are IOs accessing ' + 'the system. Retry to delete host port ' + '%(portid)s 10s later.', {'portid': portid}) + + time.sleep(10) + attempts -= 1 + continue + else: + self._assert_cli_operate_out( + 'delete_hostport', + 'Failed to delete host port %s.' % portid, + cli_cmd, out) + break + + def delete_host(self, hostid): + """Run CLI command to delete host.""" + cli_cmd = ('delhost -force -host %(hostid)s' % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('delete_host', + 'Failed to delete host. %s.' % hostid, + cli_cmd, out) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_pool_info(self, pool_name, pool_type, pool_dev, disk_info): + pool_info = {} + pool_info['pool_name'] = pool_name + pool_info['QoS_support'] = False + pool_info['reserved_percentage'] = 0 + pool_info['total_capacity_gb'] = 0.0 + pool_info['free_capacity_gb'] = 0.0 + key = 'TotalCapacity(MB)' + + pool_details = self.get_pool_details(pool_type, pool_dev[0]) + if 'Thin' == pool_type: + pool_info['free_capacity_gb'] = (float(pool_dev[4]) / 1024) + pool_info['total_capacity_gb'] = (float(pool_details[key]) / 1024) + pool_info['thin_provisioning_support'] = True + elif 'Thick' == pool_type: + pool_info['free_capacity_gb'] = (float(pool_dev[3]) / 1024) + pool_info['total_capacity_gb'] = (float(pool_details[key]) / 1024) + pool_info['thick_provisioning_support'] = True + + if not self._is_pool_normal(pool_details, disk_info): + pool_info['free_capacity_gb'] = 0.0 + pool_info['total_capacity_gb'] = 0.0 + + return pool_info + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug("_update_volume_stats: Updating volume stats.") + self.update_login_info() + params_conf = self._parse_conf_lun_params() + pools_conf = params_conf['StoragePool'] + + data = {} + data['vendor_name'] = 'Huawei' + data['pools'] = [] + thick_pools = self._get_dev_pool_info('Thick') + thin_pools = self._get_dev_pool_info('Thin') + disk_info = self._get_disk_info() + + for pool_conf in pools_conf: + is_find = False + for pool_dev in thick_pools: + if pool_dev[5] == pool_conf: + pool_info = self._update_pool_info(pool_conf, + 'Thick', + pool_dev, + disk_info) + data['pools'].append(pool_info) + is_find = True + + for pool_dev in thin_pools: + if pool_dev[1] == pool_conf: + pool_info = self._update_pool_info(pool_conf, + 'Thin', + pool_dev, + disk_info) + data['pools'].append(pool_info) + is_find = True + + if is_find is not True: + pool_info = self._update_pool_info(pool_conf, '', + pool_dev, disk_info) + + self._stats = data + + def _get_dev_pool_info(self, pooltype): + """Get pools information created in storage device. + + Return a list whose elements are also list. + + """ + + cli_cmd = ('showpool' if pooltype == 'Thin' else 'showrg') + out = self._execute_cli(cli_cmd) + + test = (re.search('Pool Information', out) or + re.search('RAID Group Information', out)) + if test: + pool = out.split('\r\n')[6:-2] + return [line.split() for line in pool] + + return [] + + def get_pool_details(self, pooltype, pool_id): + cli_cmd = ('showpool -pool ' if pooltype == 'Thin' else 'showrg -rg ') + cli_cmd += '%s' % pool_id + out = self._execute_cli(cli_cmd) + + test = (re.search('Pool Information', out) or + re.search('RAID Group Information', out)) + self._assert_cli_out(test, 'get_pool_details', + 'No pool details found.', cli_cmd, out) + + lun_details = {} + try: + for line in out.split('\r\n')[4:-2]: + line = line.split('|') + key = ''.join(line[0].strip().split()) + if len(line) < 2: + continue + val = line[1].strip() + lun_details[key] = val + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return lun_details + + def _is_pool_normal(self, pool_details, disk_info): + if pool_details['Status'] != 'Normal': + LOG.warning('%s status is not normal', pool_details['Name']) + return False + + disk_list = pool_details['MemberDiskList'].split(';') + for disk in disk_list: + if (disk and not self._is_disk_available(disk, disk_info)): + LOG.warning('disk %(disk)s in pool %(pool)s' + ' is not available', + {'disk': disk, 'pool': pool_details['Name']}) + return False + + return True + + def _get_disk_info(self, info_type='logic'): + cli_cmd = 'showdisk -' + info_type + out = self._execute_cli(cli_cmd) + + test = re.search('Disk Information', out) + self._assert_cli_out(test, '_get_disk_info', + 'No disk information found.', cli_cmd, out) + return out + + def _is_disk_available(self, disk_name, disk_info): + pattern = r"\(%s\)\s+(Normal|Reconstructed)" % disk_name + if re.search(pattern, disk_info): + return True + + LOG.warning('disk (%s) status is unavailable', disk_name) + return False + + def _get_iscsi_tgt_port_info_ultrapath(self, port_ip_list): + """Get iSCSI Port information of storage device.""" + port_info_list = [] + cli_cmd = 'showiscsiip' + out = self._execute_cli(cli_cmd) + if re.search('iSCSI IP Information', out): + try: + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 4: + continue + if tmp_line[3] in port_ip_list: + port_info_list.append(tmp_line) + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if port_info_list: + return port_info_list + + err_msg = _('All target IPs are not available,' + 'all target IPs is not configured in array') + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def get_tgt_iqn_ultrapath(self, port_ip_list): + + LOG.debug('get_tgt_iqn_ultrapath: iSCSI list is %s.' % port_ip_list) + + cli_cmd = 'showiscsitgtname' + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('ISCSI Name', out), + 'get_tgt_iqn', + 'Failed to get iSCSI target iqn.', + cli_cmd, out) + + lines = out.split('\r\n') + try: + index = lines[4].index('iqn') + iqn_prefix = lines[4][index:].strip() + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + # Here we make sure port_info won't be None. + ret_info = [] + port_info_list = self._get_iscsi_tgt_port_info_ultrapath(port_ip_list) + for port_info in port_info_list: + ctr = ('0' if port_info[0] == 'A' else '1') + interface = '0' + port_info[1] + port = '0' + port_info[2][1:] + iqn_suffix = ctr + '02' + interface + port + # iqn_suffix should not start with 0 + while(True): + if iqn_suffix.startswith('0'): + iqn_suffix = iqn_suffix[1:] + else: + break + + iqn = iqn_prefix + ':' + iqn_suffix + ':' + port_info[3] + + LOG.debug('_get_tgt_iqn: iSCSI target iqn is %s.' % iqn) + ret_info.append((iqn, port_info[3], port_info[0])) + + return ret_info + + def get_tgt_iqn(self, port_ip): + """Run CLI command to get target iSCSI iqn. + + The iqn is formed with three parts: + iSCSI target name + iSCSI port info + iSCSI IP + + """ + + LOG.debug('get_tgt_iqn: iSCSI IP is %s.' % port_ip) + + cli_cmd = 'showiscsitgtname' + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('ISCSI Name', out), + 'get_tgt_iqn', + 'Failed to get iSCSI target %s iqn.' % port_ip, + cli_cmd, out) + + lines = out.split('\r\n') + try: + index = lines[4].index('iqn') + iqn_prefix = lines[4][index:].strip() + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + # Here we make sure port_info won't be None. + port_info = self._get_iscsi_tgt_port_info(port_ip) + ctr = ('0' if port_info[0] == 'A' else '1') + interface = '0' + port_info[1] + port = '0' + port_info[2][1:] + iqn_suffix = ctr + '02' + interface + port + # iqn_suffix should not start with 0 + while(True): + if iqn_suffix.startswith('0'): + iqn_suffix = iqn_suffix[1:] + else: + break + + iqn = iqn_prefix + ':' + iqn_suffix + ':' + port_info[3] + + LOG.debug('get_tgt_iqn: iSCSI target iqn is %s.' % iqn) + + return (iqn, port_info[0]) + + def _get_iscsi_tgt_port_info(self, port_ip): + """Get iSCSI Port information of storage device.""" + cli_cmd = 'showiscsiip' + out = self._execute_cli(cli_cmd) + if re.search('iSCSI IP Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 4: + continue + if tmp_line[3] == port_ip: + return tmp_line + + err_msg = _('_get_iscsi_tgt_port_info: Failed to get iSCSI port ' + 'info. Please make sure the iSCSI port IP %s is ' + 'configured in array.') % port_ip + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def add_iscsi_port_to_host(self, hostid, connector, + chapinfo=None, multipathtype=0,): + """Add an iSCSI port to the given host. + + First, add an initiator if needed, the initiator is equivalent to + an iSCSI port. Then, add the initiator to host if not added before. + + """ + + initiator = connector['initiator'] + # Add an iSCSI initiator. + if not self._initiator_added(initiator): + self._add_initiator(initiator) + # Add the initiator to host if not added before. + port_name = HOST_PORT_PREFIX + six.text_type(hash(initiator)) + portadded = False + hostport_info = self.get_host_port_info(hostid) + if hostport_info: + for hostport in hostport_info: + if hostport[2] == initiator: + portadded = True + break + + if chapinfo: + if self._chapuser_added_to_array(initiator, chapinfo[0]): + self.change_chapuser_password(chapinfo) + else: + self.add_chapuser_to_array(chapinfo) + if not self._chapuser_added_to_initiator(initiator, chapinfo[0]): + self.add_chapuser_to_ini(chapinfo, initiator) + + self.active_chap(initiator) + + if not portadded: + cli_cmd = ('addhostport -host %(id)s -type 5 ' + '-info %(info)s -n %(name)s -mtype %(multype)s' + % {'id': hostid, + 'info': initiator, + 'name': port_name, + 'multype': multipathtype}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to add iSCSI port %(port)s to host %(host)s' + % {'port': port_name, + 'host': hostid}) + self._assert_cli_operate_out('add_iscsi_port_to_host', + msg, cli_cmd, out) + + def _initiator_added(self, ininame): + """Check whether the initiator is already added.""" + cli_cmd = 'showiscsiini -ini %(name)s' % {'name': ininame} + out = self._execute_cli(cli_cmd) + return (True if re.search('Initiator Information', out) else False) + + def _add_initiator(self, ininame): + """Add a new initiator to storage device.""" + cli_cmd = 'addiscsiini -n %(name)s' % {'name': ininame} + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_add_iscsi_host_port', + 'Failed to add initiator %s' % ininame, + cli_cmd, out) + + def _chapuser_added_to_array(self, initiator, chapuser_name): + """Check whether the chapuser is already added to array.""" + cli_cmd = ('showchapuser') + out = self._execute_cli(cli_cmd) + if re.search('Chap User Information', out): + try: + for line in out.split('\r\n')[4:-2]: + tmp_line = line.split() + if chapuser_name in tmp_line: + return True + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return False + + def _chapuser_added_to_initiator(self, initiator, chapuser_name): + """Check whether the chapuser is already added to initiator.""" + cli_cmd = ('showchapuser -ini %(name)s' % {'name': initiator}) + out = self._execute_cli(cli_cmd) + if re.search('Chap User Information', out): + try: + for line in out.split('\r\n')[4:-2]: + tmp_line = line.split() + if chapuser_name in tmp_line: + return True + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return False + + def is_initiator_used_chap(self, initiator): + """Check whether the initiator is used chap.""" + cli_cmd = 'showiscsiini -ini %(name)s' % {'name': initiator} + out = self._execute_cli(cli_cmd) + if re.search('Initiator Information', out): + try: + for line in out.split('\r\n'): + tmp_line = line.split() + if 'Enabled' in tmp_line: + return True + except Exception: + err_msg = (_('CLI out is not normal. CLI out: %s') % out) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return False + + def change_chapuser_password(self, chapinfo): + # change chapuser password. + LOG.info('Change chappassword, chapuser is: %s .', chapinfo[0]) + cli_cmd = ('chgchapuserpwd -n %(chapuser)s ' + '-npwd %(new_password)s ' + '-opwd %(old_password)s' % + {'chapuser': chapinfo[0], + 'new_password': chapinfo[1], + 'old_password': chapinfo[1]}) + out = self._execute_cli(cli_cmd) + + if 'Invalid user name or password' in out: + LOG.error('Failed to change chappassword, chapuser is: %s .', + chapinfo[0]) + msg = ('Failed to change chappassword.') + self._assert_cli_operate_out('change chappassword.', + msg, None, out) + + def add_chapuser_to_array(self, chapinfo): + # add chapuser. + LOG.info('Add chappassword, chapuser is: %s .', chapinfo[0]) + cli_cmd = ('addchapuser -n %(username)s -pwd %(password)s' % + {'username': chapinfo[0], + 'password': chapinfo[1]}) + out = self._execute_cli(cli_cmd) + + if 'The CHAP user exists already' in out: + LOG.warning('The CHAP user %s exists already.', + chapinfo[0]) + else: + msg = ('Failed to add chapinfo.') + self._assert_cli_operate_out('add chapuser', + msg, None, out) + + def add_chapuser_to_ini(self, chapinfo, initiator): + # add chapuser to initiator. + cli_cmd = ('addchapusertoini -chapuser %(username)s ' + '-ini %(initiator)s' + % {'username': chapinfo[0], + 'initiator': initiator}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to add chapuser into initiator.') + self._assert_cli_operate_out('add chapuser into initiator', + msg, cli_cmd, out) + + def active_chap(self, initiator): + cli_cmd = ('chginichapstatus -ini %(initiator)s ' + '-st %(chap_status)s' + % {'initiator': initiator, + 'chap_status': 1}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to change chap status.') + self._assert_cli_operate_out('change chap status.', + msg, cli_cmd, out) + + def _remove_chap(self, initiator, chapinfo): + "change chapstatus to unopen for initiator" + cli_cmd = ('chginichapstatus -ini %(initiator)s ' + '-st %(chap_status)s' % + {'initiator': initiator, + 'chap_status': 0}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to change chap status.') + self._assert_cli_operate_out('change chap status.', + msg, cli_cmd, out) + + # remove chapuser from initiator. + cli_cmd = ('rmchapuserfromini -chapuser %(username)s ' + '-ini %(initiator)s' % + {'username': chapinfo[0], + 'initiator': initiator}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to remove chapuser from initiator.') + self._assert_cli_operate_out('remove chapuser from initiator', + msg, cli_cmd, out) + + def get_connected_free_wwns(self): + """Get free connected FC port WWNs. + + If no new ports connected, return an empty list. + + """ + + cli_cmd = 'showfreeport' + out = self._execute_cli(cli_cmd) + wwns = [] + if re.search('Host Free Port Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 5: + continue + if (tmp_line[1] == 'FC') and (tmp_line[4] == 'Connected'): + wwns.append(tmp_line[0]) + + return list(set(wwns)) + + def add_fc_port_to_host(self, hostid, wwn, multipathtype=0): + """Add a FC port to host.""" + portname = HOST_PORT_PREFIX + wwn + cli_cmd = ('addhostport -host %(id)s -type 1 ' + '-wwn %(wwn)s -n %(name)s -mtype %(multype)s' + % {'id': hostid, + 'wwn': wwn, + 'name': portname, + 'multype': multipathtype}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to add FC port %(port)s to host %(host)s.' + % {'port': portname, 'host': hostid}) + self._assert_cli_operate_out('add_fc_port_to_host', msg, cli_cmd, out) + + def get_host_port_details(self, host_id): + cli_cmd = 'showhostpath -host %s' % host_id + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('Multi Path Information', out), + 'get_host_port_details', + 'Failed to get host port details.', + cli_cmd, out) + + port_details = [] + tmp_details = {} + for line in out.split('\r\n')[4:-2]: + line = line.split('|') + # Cut-point of multipal details, usually is "-------". + if len(line) == 1: + port_details.append(tmp_details) + tmp_details = {} + continue + key = ''.join(line[0].strip().split()) + val = line[1].strip() + tmp_details[key] = val + port_details.append(tmp_details) + return port_details + + def get_all_fc_ports_from_array(self): + # Get all host ports + cli_cmd = ('showport -logic 1') + out = self._execute_cli(cli_cmd) + fc_ports = [] + if re.search('Port Information', out): + test_list = out.split('\r\n') + for line in test_list[6:-2]: + tmp_line = line.split() + for contr in contrs: + if len(tmp_line) < 10: + continue + if (tmp_line[6] == 'FC' and tmp_line[0] == contr and + tmp_line[9] == 'Up'): + cmd = ('showport -c %(contr)s -e %(enclu)s -mt 3 ' + '-module %(module)s -p %(pr_id)s -pt 1' + % {'contr': contr, + 'enclu': tmp_line[1], + 'module': tmp_line[4], + 'pr_id': tmp_line[5]}) + res = self._execute_cli(cmd) + if re.search('Port Information', res): + tmp_list = res.split('\r\n') + for li in tmp_list[6:-2]: + tmp_li = li.split() + if tmp_li[0] == 'WWN(MAC)': + fc_ports.append(tmp_li[2]) + break + return fc_ports + + def get_fc_ports_from_contr(self, contr): + # Get all host ports per controller. + cli_cmd = ('showport -logic 1') + out = self._execute_cli(cli_cmd) + fc_ports = [] + if re.search('Port Information', out): + test_list = out.split('\r\n') + for line in test_list[6:-2]: + tmp_line = line.split() + if len(tmp_line) < 10: + continue + if (tmp_line[6] == 'FC' and tmp_line[0] == contr and + tmp_line[9] == 'Up'): + cmd = ('showport -c %(contr)s -e %(enclu)s -mt 3 -module ' + '%(module)s -p %(pr_id)s -pt 1' + % {'contr': contr, + 'enclu': tmp_line[1], + 'module': tmp_line[4], + 'pr_id': tmp_line[5]}) + res = self._execute_cli(cmd) + if re.search('Port Information', res): + tmp_list = res.split('\r\n') + for li in tmp_list[6:-2]: + tmp_li = li.split() + if tmp_li[0] == 'WWN(MAC)': + fc_ports.append(tmp_li[2]) + break + return fc_ports + + def ensure_fc_initiator_added(self, initiator_name, hostid): + # There is no command to query used host_ini. + self._add_fc_initiator_to_array(initiator_name) + # Just add, no need to check whether have been added. + self._add_fc_port_to_host(hostid, initiator_name) + + def _add_fc_initiator_to_array(self, initiator_name): + cli_cmd = ('addofflinewwpn -t 1 -wwpn %s' % initiator_name) + self._execute_cli(cli_cmd) + + def _add_fc_port_to_host(self, hostid, wwn, multipathtype=0): + """Add a FC port to host.""" + portname = HOST_PORT_PREFIX + wwn + cli_cmd = ('addhostport -host %(id)s -type 1 ' + '-wwn %(wwn)s -n %(name)s -mtype %(multype)s' + % {'id': hostid, + 'wwn': wwn, + 'name': portname, + 'multype': multipathtype}) + self._execute_cli(cli_cmd)