diff --git a/Cinder/Newton/huawei_conf.py b/Cinder/Newton/huawei_conf.py index b60c635..d7ff363 100644 --- a/Cinder/Newton/huawei_conf.py +++ b/Cinder/Newton/huawei_conf.py @@ -560,19 +560,19 @@ def _replication_pair_sync_speed(self, xml_root): def _get_local_minimum_fc_initiator(self, xml_root): text = xml_root.findtext('FC/MinOnlineFCInitiator') minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if text and text.strip() and text.strip().isdigit(): try: minimum_fc_initiator = int(text.strip()) - if minimum_fc_initiator < 0: - msg = (_("Minimum FC initiator number %(num)s cannot" - " be set to a negative number.") - % {"num": minimum_fc_initiator}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) except Exception as err: msg = (_("Minimum FC initiator number %(num)s is set" " too large, reason is %(err)s") - % {"num": minimum_fc_initiator, "err": err}) + % {"num": text.strip(), "err": err}) LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) diff --git a/Cinder/Ocata/huawei_conf.py b/Cinder/Ocata/huawei_conf.py index b60c635..d7ff363 100644 --- a/Cinder/Ocata/huawei_conf.py +++ b/Cinder/Ocata/huawei_conf.py @@ -560,19 +560,19 @@ def _replication_pair_sync_speed(self, xml_root): def _get_local_minimum_fc_initiator(self, xml_root): text = xml_root.findtext('FC/MinOnlineFCInitiator') minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if text and text.strip() and text.strip().isdigit(): try: minimum_fc_initiator = int(text.strip()) - if minimum_fc_initiator < 0: - msg = (_("Minimum FC initiator number %(num)s cannot" - " be set to a negative number.") - % {"num": minimum_fc_initiator}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) except Exception as err: msg = (_("Minimum FC initiator number %(num)s is set" " too large, reason is %(err)s") - % {"num": minimum_fc_initiator, "err": err}) + % {"num": text.strip(), "err": err}) LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) diff --git a/Cinder/Pike/huawei_conf.py b/Cinder/Pike/huawei_conf.py index b60c635..d7ff363 100644 --- a/Cinder/Pike/huawei_conf.py +++ b/Cinder/Pike/huawei_conf.py @@ -560,19 +560,19 @@ def _replication_pair_sync_speed(self, xml_root): def _get_local_minimum_fc_initiator(self, xml_root): text = xml_root.findtext('FC/MinOnlineFCInitiator') minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if text and text.strip() and text.strip().isdigit(): try: minimum_fc_initiator = int(text.strip()) - if minimum_fc_initiator < 0: - msg = (_("Minimum FC initiator number %(num)s cannot" - " be set to a negative number.") - % {"num": minimum_fc_initiator}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) except Exception as err: msg = (_("Minimum FC initiator number %(num)s is set" " too large, reason is %(err)s") - % {"num": minimum_fc_initiator, "err": err}) + % {"num": text.strip(), "err": err}) LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) diff --git a/Cinder/Pike/huawei_driver.py b/Cinder/Pike/huawei_driver.py index 9310508..adb2d3f 100644 --- a/Cinder/Pike/huawei_driver.py +++ b/Cinder/Pike/huawei_driver.py @@ -2997,8 +2997,8 @@ def initialize_connection(self, volume, connector): iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or - self.client.is_host_associated_to_hostgroup( - host_id)): + self.client.is_host_associated_to_hostgroup( + host_id)): self.client.remove_host(host_id) msg = ("There is an Fc initiator in an invalid " "state. If you want to continue to attach " diff --git a/Cinder/Queens/huawei_conf.py b/Cinder/Queens/huawei_conf.py index b60c635..d7ff363 100644 --- a/Cinder/Queens/huawei_conf.py +++ b/Cinder/Queens/huawei_conf.py @@ -560,19 +560,19 @@ def _replication_pair_sync_speed(self, xml_root): def _get_local_minimum_fc_initiator(self, xml_root): text = xml_root.findtext('FC/MinOnlineFCInitiator') minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if text and text.strip() and text.strip().isdigit(): try: minimum_fc_initiator = int(text.strip()) - if minimum_fc_initiator < 0: - msg = (_("Minimum FC initiator number %(num)s cannot" - " be set to a negative number.") - % {"num": minimum_fc_initiator}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) except Exception as err: msg = (_("Minimum FC initiator number %(num)s is set" " too large, reason is %(err)s") - % {"num": minimum_fc_initiator, "err": err}) + % {"num": text.strip(), "err": err}) LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) diff --git a/Cinder/Rocky/huawei_base_driver.py b/Cinder/Rocky/huawei_base_driver.py index 08afaab..9384201 100644 --- a/Cinder/Rocky/huawei_base_driver.py +++ b/Cinder/Rocky/huawei_base_driver.py @@ -261,7 +261,7 @@ def _update_support_capability(self): if self.support_capability["Effective Capacity"]: self.support_capability["SmartDedupe[\s\S]*LUN"] = True self.support_capability["SmartCompression[\s\S]*LUN"] = True - del self.support_capability["Effective Capacity"] + del self.support_capability["Effective Capacity"] self._update_hypermetro_capability() self._update_replication_capability() @@ -720,7 +720,7 @@ def _change_same_host_lun_id(self, local_mapping, remote_mapping): if local_mapping['hostlun_id'] == remote_mapping['hostlun_id']: return local_mapping['hostlun_id'] - for i in xrange(1, 512): + for i in range(1, 512): if i in loc_aval_host_lun_ids and i in rmt_aval_host_lun_ids: same_host_lun_id = i break diff --git a/Cinder/Rocky/huawei_utils.py b/Cinder/Rocky/huawei_utils.py index 81d8373..6e7414d 100644 --- a/Cinder/Rocky/huawei_utils.py +++ b/Cinder/Rocky/huawei_utils.py @@ -425,7 +425,7 @@ def get_lun_info(client, volume): lun_info = client.get_lun_info_by_name(volume_name) if not lun_info and metadata.get('huawei_lun_id'): - lun_info = client.get_lun_info_by_id(metadata['huawei_lun_id']) + lun_info = client.get_lun_info_filter_id(metadata['huawei_lun_id']) if lun_info and ('huawei_lun_wwn' in metadata and lun_info.get('WWN') != metadata['huawei_lun_wwn']): diff --git a/Cinder/Rocky/rest_client.py b/Cinder/Rocky/rest_client.py index b2cfb87..fb034c5 100644 --- a/Cinder/Rocky/rest_client.py +++ b/Cinder/Rocky/rest_client.py @@ -173,6 +173,12 @@ def get_lun_info_by_id(self, lun_id): _assert_result(result, 'Get lun info by id %s error.', lun_id) return result['data'] + def get_lun_info_filter_id(self, lun_id): + result = self.get("?filter=ID::%(lun_id)s", lun_id=lun_id) + _assert_result(result, 'Get lun info filter id %s error.', lun_id) + if result.get('data'): + return result['data'][0] + def get_lun_host_lun_id(self, host_id, lun_id): result = self.get( "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s", id=host_id) diff --git a/Cinder/Stein/huawei_base_driver.py b/Cinder/Stein/huawei_base_driver.py index 08afaab..9384201 100644 --- a/Cinder/Stein/huawei_base_driver.py +++ b/Cinder/Stein/huawei_base_driver.py @@ -261,7 +261,7 @@ def _update_support_capability(self): if self.support_capability["Effective Capacity"]: self.support_capability["SmartDedupe[\s\S]*LUN"] = True self.support_capability["SmartCompression[\s\S]*LUN"] = True - del self.support_capability["Effective Capacity"] + del self.support_capability["Effective Capacity"] self._update_hypermetro_capability() self._update_replication_capability() @@ -720,7 +720,7 @@ def _change_same_host_lun_id(self, local_mapping, remote_mapping): if local_mapping['hostlun_id'] == remote_mapping['hostlun_id']: return local_mapping['hostlun_id'] - for i in xrange(1, 512): + for i in range(1, 512): if i in loc_aval_host_lun_ids and i in rmt_aval_host_lun_ids: same_host_lun_id = i break diff --git a/Cinder/Stein/huawei_utils.py b/Cinder/Stein/huawei_utils.py index 81d8373..6e7414d 100644 --- a/Cinder/Stein/huawei_utils.py +++ b/Cinder/Stein/huawei_utils.py @@ -425,7 +425,7 @@ def get_lun_info(client, volume): lun_info = client.get_lun_info_by_name(volume_name) if not lun_info and metadata.get('huawei_lun_id'): - lun_info = client.get_lun_info_by_id(metadata['huawei_lun_id']) + lun_info = client.get_lun_info_filter_id(metadata['huawei_lun_id']) if lun_info and ('huawei_lun_wwn' in metadata and lun_info.get('WWN') != metadata['huawei_lun_wwn']): diff --git a/Cinder/Stein/rest_client.py b/Cinder/Stein/rest_client.py index b2cfb87..fb034c5 100644 --- a/Cinder/Stein/rest_client.py +++ b/Cinder/Stein/rest_client.py @@ -173,6 +173,12 @@ def get_lun_info_by_id(self, lun_id): _assert_result(result, 'Get lun info by id %s error.', lun_id) return result['data'] + def get_lun_info_filter_id(self, lun_id): + result = self.get("?filter=ID::%(lun_id)s", lun_id=lun_id) + _assert_result(result, 'Get lun info filter id %s error.', lun_id) + if result.get('data'): + return result['data'][0] + def get_lun_host_lun_id(self, host_id, lun_id): result = self.get( "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s", id=host_id) diff --git a/Cinder/Train/__init__.py b/Cinder/Train/__init__.py new file mode 100644 index 0000000..51eff13 --- /dev/null +++ b/Cinder/Train/__init__.py @@ -0,0 +1 @@ +"""Version: 2.2.4""" diff --git a/Cinder/Train/constants.py b/Cinder/Train/constants.py new file mode 100644 index 0000000..4eaf905 --- /dev/null +++ b/Cinder/Train/constants.py @@ -0,0 +1,164 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +STATUS_INITIALIZE = '0' +STATUS_HEALTH = '1' +LUN_TYPE = '11' +SNAPSHOT_TYPE = '27' +BLOCK_POOL_TYPE = '1' + +HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' +LUNGROUP_PREFIX = 'OpenStack_LunGroup_' +MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' +PORTGROUP_PREFIX = 'OpenStack_PortGroup_' +QOS_NAME_PREFIX = 'OpenStack_' + +FC_PORT_CONNECTED = '10' +CAPACITY_UNIT = 1024 * 1024 * 2 +DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 +DEFAULT_WAIT_INTERVAL = 5 +MAX_NAME_LENGTH = 31 +SOCKET_TIMEOUT = 52 +LOGIN_SOCKET_TIMEOUT = 4 +PWD_EXPIRED_OR_INITIAL = (3, 4) + +LUN_STATUS = (LUN_ONLINE, LUN_INITIALIZING, LUN_OFFLINE) = ('27', '53', '28') +SNAPSHOT_STATUS = ( + SNAPSHOT_INITIALIZING, SNAPSHOT_ACTIVATED, SNAPSHOT_UNACTIVATED +) = ('53', '43', '45') + +MIGRATION_STATUS_IN_PROCESS = ( + MIGRATION_NORMAL, MIGRATION_QUEUING, MIGRATION_MIGRATING +) = ('1', '37', '75') +MIGRATION_STATUS_COMPLETE = (MIGRATION_COMPLETE,) = ('76',) +LUNCOPY_STATUS_COMPLETE = (LUNCOPY_COMPLETE,) = ('40',) + +ERROR_CONNECT_TO_SERVER = -403 +ERROR_UNAUTHORIZED_TO_SERVER = -401 +OBJECT_NAME_ALREADY_EXIST = 1077948993 +OBJECT_ID_NOT_UNIQUE = 1077948997 +ERROR_VOLUME_NOT_EXIST = 1077939726 +ERROR_LUN_NOT_EXIST = 1077936859 +SNAPSHOT_NOT_EXIST = 1077937880 +OBJECT_NOT_EXIST = 1077948996 +HYPERMETRO_NOT_EXIST = 1077674242 +HYPERMETRO_NOT_IN_GROUP = 1077675021 +HYPERMETROGROUP_NOT_EXIST = 1077675010 +HYPERMETRO_ALREADY_IN_GROUP = 1077675038 +NO_HYPERMETRO_EXIST_IN_GROUP = 1077675022 +HOSTGROUP_NOT_IN_MAPPINGVIEW = 1073804552 +PORTGROUP_NOT_IN_MAPPINGVIEW = 1073804553 +LUNGROUP_NOT_IN_MAPPINGVIEW = 1073804554 +MIGRATION_NOT_EXIST = 1073806607 +LUNCOPY_NOT_EXIST = 50338560 +LUNCOPY_ALREADY_STOPPED = 1077950178 +LUNCOPY_COMPLETED = 1077950180 +PORTGROUP_NOT_EXIST = 1077951832 +HOSTGROUP_NOT_EXIST = 1077937500 +HOST_NOT_IN_HOSTGROUP = 1073745412 +PORT_NOT_IN_PORTGROUP = 1073807618 +INITIATOR_NOT_IN_HOST = 1077950342 +HOST_NOT_EXIST = 1077937498 +MAPPINGVIEW_NOT_EXIST = 1077951819 +HOST_ALREADY_IN_HOSTGROUP = 1077937501 +PORT_ALREADY_IN_PORTGROUP = 1077951833 +HOSTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804556 +PORTGROUP_ALREADY_IN_MAPPINGVIEW = 1073804558 +LUNGROUP_ALREADY_IN_MAPPINGVIEW = 1073804560 + +METRO_RUNNING_STATUS = (METRO_RUNNING_NORMAL, METRO_RUNNING_SYNC, + METRO_RUNNING_STOP, RUNNING_TO_BE_SYNC + ) = ('1', '23', '41', '100') +METRO_HEALTH_NORMAL = '1' + +THICK_LUNTYPE = '0' +THIN_LUNTYPE = '1' +LUN_TYPE_MAP = {'Thick': THICK_LUNTYPE, + 'Thin': THIN_LUNTYPE} + +QOS_INACTIVATED = '45' +LOWER_LIMIT_KEYS = ('MINIOPS', 'LATENCY', 'MINBANDWIDTH') +UPPER_LIMIT_KEYS = ('MAXIOPS', 'MAXBANDWIDTH') + +REPLICA_SYNC_MODEL = '1' +REPLICA_ASYNC_MODEL = '2' +REPLICA_SPEED = '2' +REPLICA_PERIOD = '3600' +REPLICA_SECOND_RO = '2' +REPLICA_SECOND_RW = '3' +REPLICA_CG_PERIOD = '60' + +REPLICA_RUNNING_STATUS_SYNC = '23' +REPLICA_RUNNING_STATUS_NORMAL = '1' +REPLICA_RUNNING_STATUS_SPLIT = '26' +REPLICA_RUNNING_STATUS_INTERRUPTED = '34' +REPLICA_SECRES_DATA_SYNC = '1' +REPLICA_SECRES_DATA_COMPLETE = '2' +REPLICA_HEALTH_STATUS_NORMAL = '1' + +REPLICATION_PAIR_NOT_EXIST = 1077937923 +REPLICATION_GROUP_NOT_EXIST = 1077937924 +REPLICATION_PAIR_NOT_GROUP_MEMBER = 1077937927 +REPLICATION_GROUP_IS_EMPTY = 1077937960 + +VALID_PRODUCT = ('V3', 'V5', '18000', 'Dorado') +TIER_DISK_TYPES = ('ssd', 'sas', 'nl_sas') + +AVAILABLE_FEATURE_STATUS = (1, 2) +CHECK_FEATURES = { + 'SmartTier': None, + 'SmartThin': None, + 'SmartQoS': 'ioclass', + 'SmartPartition': 'cachepartition', + 'SmartCache': 'smartcachepartition', + 'SmartMigration': 'LUN_MIGRATION', + 'HyperMetro': 'HyperMetroPair', + 'HyperReplication': 'REPLICATIONPAIR', + 'HyperSnap': 'snapshot', + 'HyperCopy': 'LUNCOPY', + 'SmartDedupe[\s\S]*LUN': None, + 'SmartCompression[\s\S]*LUN': None, + 'Effective Capacity': None, +} + +LUN_COPY_SPEED_TYPES = ( + LUN_COPY_SPEED_LOW, + LUN_COPY_SPEED_MEDIUM, + LUN_COPY_SPEED_HIGH, + LUN_COPY_SPEED_HIGHEST +) = ('1', '2', '3', '4') +DEFAULT_CLONE_MODE = "luncopy" + +HYPER_SYNC_SPEED_TYPES = ( + HYPER_SYNC_SPEED_LOW, + HYPER_SYNC_SPEED_MEDIUM, + HYPER_SYNC_SPEED_HIGH, + HYPER_SYNC_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +REPLICA_SYNC_SPEED_TYPES = ( + REPLICA_SYNC_SPEED_LOW, + REPLICA_SYNC_SPEED_MEDIUM, + REPLICA_SYNC_SPEED_HIGH, + REPLICA_SYNC_SPEED_HIGHEST +) = ('1', '2', '3', '4') + +CLONE_STATUS_HEALTH = '0' +CLONE_STATUS_COMPLETE = (CLONE_COMPLETE,) = ('2',) +CLONE_PAIR_NOT_EXIST = "1073798147" +SUPPORT_CLONE_PAIR_VERSION = "V600R003C00" +GET_PATCH_NUM = 100 + +DEFAULT_MINIMUM_FC_INITIATOR_ONLINE = 0 diff --git a/Cinder/Train/huawei_base_driver.py b/Cinder/Train/huawei_base_driver.py new file mode 100644 index 0000000..9384201 --- /dev/null +++ b/Cinder/Train/huawei_base_driver.py @@ -0,0 +1,771 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import six +import uuid + +from oslo_config import cfg +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields + +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_conf +from cinder.volume.drivers.huawei import huawei_flow +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import rest_client + +LOG = logging.getLogger(__name__) + +huawei_opts = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='The configuration file for Huawei driver.'), + cfg.DictOpt('hypermetro_device', + secret=True, + help='To represent a hypermetro target device, which takes ' + 'standard dict config form: hypermetro_device = ' + 'key1:value1,key2:value2...'), +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts) + + +class HuaweiBaseDriver(object): + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(HuaweiBaseDriver, self).__init__(*args, **kwargs) + + if not self.configuration: + msg = _('Configuration is not found.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.configuration.append_config_values(huawei_opts) + + self.active_backend_id = kwargs.get('active_backend_id') + self.conf = huawei_conf.HuaweiConf(self.configuration) + self.local_cli = None + self.hypermetro_rmt_cli = None + self.replication_rmt_cli = None + self.support_capability = {} + + def do_setup(self, context): + self.conf.update_config_value() + + self.local_cli = rest_client.RestClient( + self.configuration.san_address, + self.configuration.san_user, + self.configuration.san_password, + self.configuration.vstore_name, + self.configuration.ssl_cert_verify, + self.configuration.ssl_cert_path) + self.local_cli.login() + + if self.configuration.hypermetro: + self.hypermetro_rmt_cli = rest_client.RestClient( + self.configuration.hypermetro['san_address'], + self.configuration.hypermetro['san_user'], + self.configuration.hypermetro['san_password'], + self.configuration.hypermetro['vstore_name'], + ) + self.hypermetro_rmt_cli.login() + + if self.configuration.replication: + self.replication_rmt_cli = rest_client.RestClient( + self.configuration.replication['san_address'], + self.configuration.replication['san_user'], + self.configuration.replication['san_password'], + self.configuration.replication['vstore_name'], + ) + self.replication_rmt_cli.login() + + def check_for_setup_error(self): + def _check_storage_pools(client, config_pools): + pools = client.get_all_pools() + pool_names = [p['NAME'] for p in pools if + p.get('USAGETYPE', constants.BLOCK_POOL_TYPE) == + constants.BLOCK_POOL_TYPE] + + for pool_name in config_pools: + if pool_name not in pool_names: + msg = _('Storage pool %s does not exist.') % pool_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + _check_storage_pools(self.local_cli, self.configuration.storage_pools) + if self.configuration.hypermetro: + _check_storage_pools( + self.hypermetro_rmt_cli, + self.configuration.hypermetro['storage_pools']) + if self.configuration.replication: + _check_storage_pools( + self.replication_rmt_cli, + self.configuration.replication['storage_pools']) + + # If host is failed-over, switch the local and remote client. + if (self.configuration.replication and self.active_backend_id == + self.configuration.replication['backend_id']): + self._switch_replication_clients() + + def backup_use_temp_snapshot(self): + return self.configuration.safe_get("backup_use_temp_snapshot") + + def create_export(self, context, volume, connector=None): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def create_export_snapshot(self, context, snapshot, connector): + pass + + def remove_export_snapshot(self, context, snapshot): + pass + + def _get_capacity(self, pool_info): + """Get free capacity and total capacity of the pool.""" + free = pool_info.get('DATASPACE', pool_info['USERFREECAPACITY']) + total = pool_info.get('USERTOTALCAPACITY') + return (float(total) / constants.CAPACITY_UNIT, + float(free) / constants.CAPACITY_UNIT) + + def _get_disk_type(self, pool_info): + """Get disk type of the pool.""" + pool_disks = [] + for i, x in enumerate(constants.TIER_DISK_TYPES): + if (pool_info.get('TIER%dCAPACITY' % i) and + pool_info.get('TIER%dCAPACITY' % i) != '0'): + pool_disks.append(x) + + if len(pool_disks) > 1: + pool_disks = ['mix'] + + return pool_disks[0] if pool_disks else None + + def _get_smarttier(self, disk_type): + return disk_type is not None and disk_type == 'mix' + + def _update_pool_stats(self): + pools = [] + for pool_name in self.configuration.storage_pools: + pool = { + 'pool_name': pool_name, + 'reserved_percentage': + self.configuration.reserved_percentage, + 'max_over_subscription_ratio': + self.configuration.max_over_subscription_ratio, + 'smartpartition': + self.support_capability['SmartPartition'], + 'smartcache': self.support_capability['SmartCache'], + 'QoS_support': self.support_capability['SmartQoS'], + 'thin_provisioning_support': + self.support_capability['SmartThin'], + 'thick_provisioning_support': True, + 'hypermetro': self.support_capability['HyperMetro'], + 'consistentcygroup_support': True, + 'consistent_group_snapshot_enabled': + self.support_capability['HyperSnap'], + 'location_info': self.local_cli.device_id, + 'replication_enabled': + self.support_capability['HyperReplication'], + 'replication_type': ['sync', 'async'], + 'multiattach': True, + 'dedup': [self.support_capability['SmartDedupe[\s\S]*LUN'], + False], + 'compression': + [self.support_capability['SmartCompression[\s\S]*LUN'], + False], + 'huawei_controller': True, + 'huawei_application_type': False, + } + + if self.configuration.san_product == "Dorado": + pool['thick_provisioning_support'] = False + pool['huawei_application_type'] = True + + pool_info = self.local_cli.get_pool_by_name(pool_name) + if pool_info: + total_capacity, free_capacity = self._get_capacity(pool_info) + disk_type = self._get_disk_type(pool_info) + tier_support = self._get_smarttier(disk_type) + + pool['total_capacity_gb'] = total_capacity + pool['free_capacity_gb'] = free_capacity + pool['smarttier'] = (self.support_capability['SmartTier'] and + tier_support) + if disk_type: + pool['disk_type'] = disk_type + + pools.append(pool) + + return pools + + def _update_hypermetro_capability(self): + if self.hypermetro_rmt_cli: + feature_status = self.hypermetro_rmt_cli.get_feature_status() + if (feature_status.get('HyperMetro') not in + constants.AVAILABLE_FEATURE_STATUS): + self.support_capability['HyperMetro'] = False + else: + self.support_capability['HyperMetro'] = False + + def _update_replication_capability(self): + if self.replication_rmt_cli: + feature_status = self.replication_rmt_cli.get_feature_status() + if (feature_status.get('HyperReplication') not in + constants.AVAILABLE_FEATURE_STATUS): + self.support_capability['HyperReplication'] = False + else: + self.support_capability['HyperReplication'] = False + + def _update_support_capability(self): + feature_status = self.local_cli.get_feature_status() + + for c in constants.CHECK_FEATURES: + self.support_capability[c] = False + for f in feature_status: + if re.match(c, f): + self.support_capability[c] = ( + feature_status[f] in + constants.AVAILABLE_FEATURE_STATUS) + break + else: + if constants.CHECK_FEATURES[c]: + self.support_capability[c] = self.local_cli.check_feature( + constants.CHECK_FEATURES[c]) + + if self.support_capability["Effective Capacity"]: + self.support_capability["SmartDedupe[\s\S]*LUN"] = True + self.support_capability["SmartCompression[\s\S]*LUN"] = True + del self.support_capability["Effective Capacity"] + + self._update_hypermetro_capability() + self._update_replication_capability() + + LOG.debug('Update backend capabilities: %s.', self.support_capability) + + def _update_volume_stats(self): + self._update_support_capability() + pools = self._update_pool_stats() + + self._stats['pools'] = pools + self._stats['volume_backend_name'] = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + self._stats['driver_version'] = self.VERSION + self._stats['vendor_name'] = 'Huawei' + self._stats['replication_enabled'] = ( + self.support_capability['HyperReplication']) + if self._stats['replication_enabled']: + self._stats['replication_targets'] = ( + [self.configuration.replication['backend_id']]) + + def get_volume_stats(self): + """Get volume status and reload huawei config file.""" + self.conf.update_config_value() + self._update_volume_stats() + + def create_volume(self, volume): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume( + volume, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def delete_volume(self, volume): + try: + huawei_flow.delete_volume( + volume, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration) + except Exception as exc: + if huawei_utils.is_not_exist_exc(exc): + return + LOG.exception('Delete volume %s failed.', volume.id) + raise + + def migrate_volume(self, ctxt, volume, host): + try: + huawei_flow.migrate_volume(volume, host, self.local_cli, + self.support_capability) + except Exception: + LOG.exception('Migrate volume %s by backend failed.', volume.id) + return False, {} + + return True, {} + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + new_name = huawei_utils.encode_name(volume.id) + org_metadata = huawei_utils.get_volume_private_data(volume) + new_metadata = huawei_utils.get_volume_private_data(new_volume) + + try: + if org_metadata.get('huawei_sn') == new_metadata.get('huawei_sn'): + self.local_cli.rename_lun(org_metadata['huawei_lun_id'], + new_name[:-4] + '-org') + self.local_cli.rename_lun(new_metadata['huawei_lun_id'], + new_name, description=volume.name) + except Exception: + LOG.exception('Unable to rename lun %(id)s to %(name)s.', + {'id': new_metadata['huawei_lun_id'], + 'name': new_name}) + name_id = new_volume.name_id + else: + LOG.info("Successfully rename lun %(id)s to %(name)s.", + {'id': new_metadata['huawei_lun_id'], + 'name': new_name}) + name_id = None + + return {'_name_id': name_id, + 'provider_location': huawei_utils.to_string(**new_metadata), + } + + def create_volume_from_snapshot(self, volume, snapshot): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume_from_snapshot( + volume, snapshot, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def create_cloned_volume(self, volume, src_vref): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.create_volume_from_volume( + volume, src_vref, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def extend_volume(self, volume, new_size): + huawei_flow.extend_volume( + volume, new_size, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration) + + def create_snapshot(self, snapshot): + snapshot_id, snapshot_wwn = huawei_flow.create_snapshot( + snapshot, self.local_cli, self.support_capability) + self.local_cli.activate_snapshot(snapshot_id) + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + return {'provider_location': location} + + def delete_snapshot(self, snapshot): + try: + huawei_flow.delete_snapshot(snapshot, self.local_cli) + except Exception as exc: + if huawei_utils.is_not_exist_exc(exc): + return + LOG.exception('Delete snapshot %s failed.', snapshot.id) + raise + + def retype(self, ctxt, volume, new_type, diff, host): + LOG.info('Start volume %(id)s retype. new_type: %(new_type)s, ' + 'diff: %(diff)s, host: %(host)s.', + {'id': volume.id, 'new_type': new_type, + 'diff': diff, 'host': host}) + + orig_lun_info = huawei_utils.get_lun_info(self.local_cli, volume) + if not orig_lun_info: + msg = _("Volume %s does not exist.") % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_opts = huawei_utils.get_volume_type_params(new_type) + if new_opts['compression'] is None: + new_opts['compression'] = (self.configuration.san_product + == "Dorado") + if new_opts['dedup'] is None: + new_opts['dedup'] = self.configuration.san_product == "Dorado" + + if huawei_utils.need_migrate(volume, host, new_opts, orig_lun_info): + hypermetro_id, replication_id = huawei_flow.retype_by_migrate( + volume, new_opts, host, self.local_cli, + self.hypermetro_rmt_cli, self.replication_rmt_cli, + self.configuration, self.support_capability) + else: + hypermetro_id, replication_id = huawei_flow.retype( + volume, new_opts, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, hypermetro_id=hypermetro_id, replication_id=replication_id) + + return True, model_update + + def manage_existing_get_size(self, volume, existing_ref): + lun_info = huawei_utils.get_external_lun_info(self.local_cli, + existing_ref) + if not lun_info: + msg = _("Lun %s to manage not exist.") % existing_ref + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remainder = float(lun_info['CAPACITY']) % constants.CAPACITY_UNIT + if remainder > 0: + msg = _("LUN size must be times of 1GB.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + size = float(lun_info['CAPACITY']) / constants.CAPACITY_UNIT + return int(size) + + def manage_existing(self, volume, existing_ref): + (lun_id, lun_wwn, hypermetro_id, replication_id + ) = huawei_flow.manage_existing( + volume, existing_ref, self.local_cli, + self.hypermetro_rmt_cli, self.replication_rmt_cli, + self.configuration, self.support_capability) + + model_update = huawei_utils.get_volume_model_update( + volume, huawei_lun_id=lun_id, huawei_lun_wwn=lun_wwn, + hypermetro_id=hypermetro_id, replication_id=replication_id, + huawei_sn=self.local_cli.device_id + ) + return model_update + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + snapshot_info = huawei_utils.get_external_snapshot_info( + self.local_cli, existing_ref) + if not snapshot_info: + msg = _("Snapshot %s not exist.") % existing_ref + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remainder = float(snapshot_info['USERCAPACITY'] + ) % constants.CAPACITY_UNIT + if remainder > 0: + msg = _("Snapshot size must be times of 1GB.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + size = float(snapshot_info['USERCAPACITY']) / constants.CAPACITY_UNIT + return int(size) + + def manage_existing_snapshot(self, snapshot, existing_ref): + snapshot_id, snapshot_wwn = huawei_flow.manage_existing_snapshot( + snapshot, existing_ref, self.local_cli) + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + return {'provider_location': location} + + def create_group(self, context, group): + huawei_flow.create_group( + group, self.local_cli, self.hypermetro_rmt_cli, + self.replication_rmt_cli, self.configuration, + self.support_capability) + return {'status': fields.GroupStatus.AVAILABLE} + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + model_update = self.create_group(context, group) + volumes_model_update = [] + delete_snapshots = False + + if not snapshots and source_vols: + snapshots = [] + for src_vol in source_vols: + vol_kwargs = { + 'id': src_vol.id, + 'provider_location': src_vol.provider_location, + } + snapshot_kwargs = {'id': six.text_type(uuid.uuid4()), + 'volume': objects.Volume(**vol_kwargs)} + snapshot = objects.Snapshot(**snapshot_kwargs) + snapshots.append(snapshot) + + snapshots_model_update = self._create_group_snapshot(snapshots) + for i, model in enumerate(snapshots_model_update): + snapshot = snapshots[i] + snapshot.provider_location = model['provider_location'] + + delete_snapshots = True + + if snapshots: + try: + for i, vol in enumerate(volumes): + snapshot = snapshots[i] + vol_model_update = self.create_volume_from_snapshot( + vol, snapshot) + vol_model_update.update({'id': vol.id}) + volumes_model_update.append(vol_model_update) + finally: + if delete_snapshots: + self._delete_group_snapshot(snapshots) + + return model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + opts = huawei_utils.get_group_type_params(group) + + hypermetro_group = any(opt for opt in opts if opt.get('hypermetro')) + if hypermetro_group: + hypermetro_mgr = hypermetro.HuaweiHyperMetro( + self.local_cli, self.hypermetro_rmt_cli, + self.configuration.hypermetro) + hypermetro_mgr.delete_consistencygroup(group.id, volumes) + + replication_group = any(opt for opt in opts + if opt.get('replication_enabled')) + if replication_group: + replication_mgr = replication.ReplicationManager( + self.local_cli, self.replication_rmt_cli, + self.configuration.replication) + replication_mgr.delete_group(group.id, volumes) + + model_update = {'status': fields.GroupStatus.DELETED} + + volumes_model_update = [] + for volume in volumes: + update = {'id': volume.id} + try: + self.delete_volume(volume) + update['status'] = 'deleted' + except Exception: + update['status'] = 'error_deleting' + finally: + volumes_model_update.append(update) + + return model_update, volumes_model_update + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + opts = huawei_utils.get_group_type_params(group) + + hypermetro_group = any(opt for opt in opts if opt.get('hypermetro')) + if hypermetro_group: + hypermetro_mgr = hypermetro.HuaweiHyperMetro( + self.local_cli, self.hypermetro_rmt_cli, + self.configuration.hypermetro) + hypermetro_mgr.update_consistencygroup( + group.id, add_volumes, remove_volumes) + + replication_group = any(opt for opt in opts + if opt.get('replication_enabled')) + if replication_group: + replication_mgr = replication.ReplicationManager( + self.local_cli, self.replication_rmt_cli, + self.configuration.replication) + replication_mgr.update_group( + group.id, add_volumes, remove_volumes) + + model_update = {'status': fields.GroupStatus.AVAILABLE} + + return model_update, None, None + + def create_group_snapshot(self, context, group_snapshot, snapshots): + try: + snapshots_model_update = self._create_group_snapshot(snapshots) + except Exception: + LOG.exception("Failed to create snapshots for group %s.", + group_snapshot.id) + raise + + model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} + return model_update, snapshots_model_update + + def _create_group_snapshot(self, snapshots): + snapshots_model_update = [] + created_snapshots = [] + + for snapshot in snapshots: + try: + snapshot_id, snapshot_wwn = huawei_flow.create_snapshot( + snapshot, self.local_cli, self.support_capability) + except Exception: + LOG.exception("Failed to create snapshot %s of group.", + snapshot.id) + for snap_id in created_snapshots: + self.local_cli.delete_snapshot(snap_id) + raise + + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_wwn) + snap_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.AVAILABLE, + 'provider_location': location, + } + snapshots_model_update.append(snap_model_update) + created_snapshots.append(snapshot_id) + + try: + self.local_cli.activate_snapshot(created_snapshots) + except Exception: + LOG.exception("Failed to activate group snapshots %s.", + created_snapshots) + for snap_id in created_snapshots: + self.local_cli.delete_snapshot(snap_id) + raise + + return snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + try: + snapshots_model_update = self._delete_group_snapshot(snapshots) + except Exception: + LOG.exception("Failed to delete snapshots for group %s.", + group_snapshot.id) + raise + + model_update = {'status': fields.GroupSnapshotStatus.DELETED} + return model_update, snapshots_model_update + + def _delete_group_snapshot(self, snapshots): + snapshots_model_update = [] + for snapshot in snapshots: + try: + self.delete_snapshot(snapshot) + snapshot_model = {'id': snapshot.id, + 'status': fields.SnapshotStatus.DELETED} + snapshots_model_update.append(snapshot_model) + except Exception: + LOG.exception("Failed to delete snapshot %s of group.", + snapshot.id) + raise + + return snapshots_model_update + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + if secondary_id == 'default': + if not self.active_backend_id: + return None, [], [] + + volumes_update = huawei_flow.failback( + volumes, self.local_cli, self.replication_rmt_cli, + self.configuration) + secondary_id = '' + elif secondary_id in ( + None, self.configuration.replication['backend_id']): + if (self.active_backend_id == + self.configuration.replication['backend_id']): + # Already failover, return success + return self.active_backend_id, [], [] + + volumes_update = huawei_flow.failover( + volumes, self.local_cli, self.replication_rmt_cli, + self.configuration) + secondary_id = self.configuration.replication['backend_id'] + else: + msg = "Invalid secondary id %s." % secondary_id + raise exception.InvalidReplicationTarget(reason=msg) + + self.active_backend_id = secondary_id + self._switch_replication_clients() + + return secondary_id, volumes_update, [] + + def _switch_replication_clients(self): + self.local_cli, self.replication_rmt_cli = ( + self.replication_rmt_cli, self.local_cli) + (self.configuration.iscsi_info, + self.configuration.replication['iscsi_info']) = ( + self.configuration.replication['iscsi_info'], + self.configuration.iscsi_info + ) + + def _change_same_host_lun_id(self, local_mapping, remote_mapping): + loc_aval_host_lun_ids = local_mapping.get('aval_host_lun_ids', []) + rmt_aval_host_lun_ids = remote_mapping.get('aval_host_lun_ids', []) + + if local_mapping['hostlun_id'] == remote_mapping['hostlun_id']: + return local_mapping['hostlun_id'] + + for i in range(1, 512): + if i in loc_aval_host_lun_ids and i in rmt_aval_host_lun_ids: + same_host_lun_id = i + break + else: + same_host_lun_id = None + + if not same_host_lun_id: + msg = _("Can't find common host lun id for hypermetro volume.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.local_cli.change_hostlun_id( + local_mapping['mappingview_id'], local_mapping['lun_id'], + same_host_lun_id) + self.hypermetro_rmt_cli.change_hostlun_id( + remote_mapping['mappingview_id'], remote_mapping['lun_id'], + same_host_lun_id) + return same_host_lun_id + + def _merge_iscsi_mapping(self, local_mapping, remote_mapping, + same_host_lun_id): + local_mapping['target_iqns'].extend(remote_mapping['target_iqns']) + local_mapping['target_portals'].extend( + remote_mapping['target_portals']) + local_mapping['target_luns'] = [same_host_lun_id] * len( + local_mapping['target_portals']) + return local_mapping + + def _merge_fc_mapping(self, local_mapping, remote_mapping, + same_host_lun_id): + self._merge_ini_tgt_map(local_mapping['initiator_target_map'], + remote_mapping['initiator_target_map']) + local_mapping['target_lun'] = same_host_lun_id + local_mapping['target_wwn'] += remote_mapping['target_wwn'] + + return local_mapping + + def _merge_ini_tgt_map(self, loc, rmt): + for k in rmt: + loc[k] = loc.get(k, []) + rmt[k] + + def _is_volume_multi_attach_to_same_host(self, volume, connector): + attachments = volume.volume_attachment + if volume.multiattach and len(attachments) > 1 and sum( + 1 for a in attachments if a.connector == connector) > 1: + LOG.info("Volume is multi-attach and attached to the same host" + " multiple times") + return diff --git a/Cinder/Train/huawei_conf.py b/Cinder/Train/huawei_conf.py new file mode 100644 index 0000000..688bdd9 --- /dev/null +++ b/Cinder/Train/huawei_conf.py @@ -0,0 +1,516 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Set Huawei private configuration into Configuration object. + +For conveniently get private configuration. We parse Huawei config file +and set every property into Configuration object as an attribute. +""" + +import base64 +from defusedxml import ElementTree as ET +import os +import re +import six + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class HuaweiConf(object): + def __init__(self, conf): + self.conf = conf + self.last_modify_time = None + + def update_config_value(self): + file_time = os.stat(self.conf.cinder_huawei_conf_file).st_mtime + if self.last_modify_time == file_time: + return + + self.last_modify_time = file_time + tree = ET.parse(self.conf.cinder_huawei_conf_file) + xml_root = tree.getroot() + self._encode_authentication(tree, xml_root) + + attr_funcs = ( + self._san_address, + self._san_user, + self._san_password, + self._san_vstore, + self._san_product, + self._ssl_cert_path, + self._ssl_cert_verify, + self._iscsi_info, + self._fc_info, + self._hyper_pair_sync_speed, + self._replication_pair_sync_speed, + self._hypermetro_devices, + self._replication_devices, + self._lun_type, + self._lun_write_type, + self._lun_prefetch, + self._storage_pools, + self._lun_copy_speed, + self._lun_copy_mode, + self._lun_copy_wait_interval, + self._lun_timeout, + self._get_minimum_fc_initiator, + ) + + for f in attr_funcs: + f(xml_root) + + def _encode_authentication(self, tree, xml_root): + name_node = xml_root.find('Storage/UserName') + pwd_node = xml_root.find('Storage/UserPassword') + vstore_node = xml_root.find('Storage/vStoreName') + + need_encode = False + if name_node is not None and not name_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(name_node.text)).decode() + name_node.text = '!$$$' + encoded + need_encode = True + + if pwd_node is not None and not pwd_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(pwd_node.text)).decode() + pwd_node.text = '!$$$' + encoded + need_encode = True + + if vstore_node is not None and not vstore_node.text.startswith('!$$$'): + encoded = base64.b64encode(six.b(vstore_node.text)).decode() + vstore_node.text = '!$$$' + encoded + need_encode = True + + if need_encode: + tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8') + + def _san_address(self, xml_root): + text = xml_root.findtext('Storage/RestURL') + if not text: + msg = _("RestURL is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + addrs = list(set([x.strip() for x in text.split(';') if x.strip()])) + setattr(self.conf, 'san_address', addrs) + + def _san_user(self, xml_root): + text = xml_root.findtext('Storage/UserName') + if not text: + msg = _("UserName is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + user = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_user', user) + + def _san_password(self, xml_root): + text = xml_root.findtext('Storage/UserPassword') + if not text: + msg = _("UserPassword is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pwd = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'san_password', pwd) + + def _san_vstore(self, xml_root): + vstore = None + text = xml_root.findtext('Storage/vStoreName') + if text: + vstore = base64.b64decode(six.b(text[4:])).decode() + setattr(self.conf, 'vstore_name', vstore) + + def _ssl_cert_path(self, xml_root): + text = xml_root.findtext('Storage/SSLCertPath') + setattr(self.conf, 'ssl_cert_path', text) + + def _ssl_cert_verify(self, xml_root): + value = False + text = xml_root.findtext('Storage/SSLCertVerify') + if text: + if text.lower() in ('true', 'false'): + value = text.lower() == 'true' + else: + msg = _("SSLCertVerify configured error.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'ssl_cert_verify', value) + + def _set_extra_constants_by_product(self, product): + extra_constants = {} + if product == 'Dorado': + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'maxBandWidth', 'IOType') + extra_constants['QOS_IOTYPES'] = ('2',) + extra_constants['SUPPORT_LUN_TYPES'] = ('Thin',) + extra_constants['DEFAULT_LUN_TYPE'] = 'Thin' + extra_constants['SUPPORT_CLONE_MODE'] = ('fastclone', 'luncopy') + else: + extra_constants['QOS_SPEC_KEYS'] = ( + 'maxIOPS', 'minIOPS', 'minBandWidth', + 'maxBandWidth', 'latency', 'IOType') + extra_constants['QOS_IOTYPES'] = ('0', '1', '2') + extra_constants['SUPPORT_LUN_TYPES'] = ('Thick', 'Thin') + extra_constants['DEFAULT_LUN_TYPE'] = 'Thick' + extra_constants['SUPPORT_CLONE_MODE'] = ('luncopy',) + + for k in extra_constants: + setattr(constants, k, extra_constants[k]) + + def _san_product(self, xml_root): + text = xml_root.findtext('Storage/Product') + if not text: + msg = _("SAN product is not configured.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + product = text.strip() + if product not in constants.VALID_PRODUCT: + msg = _("Invalid SAN product %(text)s, SAN product must be " + "in %(valid)s.") % {'text': product, + 'valid': constants.VALID_PRODUCT} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self._set_extra_constants_by_product(product) + setattr(self.conf, 'san_product', product) + + def _lun_type(self, xml_root): + lun_type = constants.DEFAULT_LUN_TYPE + text = xml_root.findtext('LUN/LUNType') + if text: + lun_type = text.strip() + if lun_type not in constants.LUN_TYPE_MAP: + msg = _("Invalid lun type %s is configured.") % lun_type + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if lun_type not in constants.SUPPORT_LUN_TYPES: + msg = _("%(array)s array requires %(valid)s lun type, " + "but %(conf)s is specified." + ) % {'array': self.conf.san_product, + 'valid': constants.SUPPORT_LUN_TYPES, + 'conf': lun_type} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'lun_type', constants.LUN_TYPE_MAP[lun_type]) + + def _lun_write_type(self, xml_root): + text = xml_root.findtext('LUN/WriteType') + if text: + write_type = text.strip() + if write_type: + setattr(self.conf, 'write_type', write_type) + + def _lun_prefetch(self, xml_root): + node = xml_root.find('LUN/Prefetch') + if node is not None: + if 'Type' in node.attrib: + prefetch_type = node.attrib['Type'].strip() + setattr(self.conf, 'prefetch_type', prefetch_type) + + if 'Value' in node.attrib: + prefetch_value = node.attrib['Value'].strip() + setattr(self.conf, 'prefetch_value', prefetch_value) + + def _storage_pools(self, xml_root): + text = xml_root.findtext('LUN/StoragePool') + if not text: + msg = _('Storage pool is not configured.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pools = set(x.strip() for x in text.split(';') if x.strip()) + if not pools: + msg = _('No valid storage pool configured.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + setattr(self.conf, 'storage_pools', list(pools)) + + def _iscsi_info(self, xml_root): + iscsi_info = {} + text = xml_root.findtext('iSCSI/DefaultTargetIP') + if text: + iscsi_info['default_target_ips'] = [ + ip.strip() for ip in text.split() if ip.strip()] + + initiators = {} + nodes = xml_root.findall('iSCSI/Initiator') + for node in nodes or []: + if 'Name' in node.attrib: + initiators[node.attrib['Name']] = node.attrib + if 'HostName' in node.attrib: + initiators[node.attrib['HostName']] = node.attrib + + if nodes and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + iscsi_info['initiators'] = initiators + self._check_hostname_regex_config(iscsi_info) + setattr(self.conf, 'iscsi_info', iscsi_info) + + def _fc_info(self, xml_root): + fc_info = {} + initiators = {} + nodes = xml_root.findall('FC/Initiator') + for node in nodes or []: + if 'Name' in node.attrib: + initiators[node.attrib['Name']] = node.attrib + if 'HostName' in node.attrib: + initiators[node.attrib['HostName']] = node.attrib + + if nodes and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + fc_info['initiators'] = initiators + self._check_hostname_regex_config(fc_info) + setattr(self.conf, 'fc_info', fc_info) + + def _check_hostname_regex_config(self, info): + for item in info['initiators'].keys(): + ini = info['initiators'][item] + if ini.get("HostName"): + try: + if ini.get("HostName") == '*': + continue + re.compile(ini['HostName']) + except Exception as err: + msg = _('Invalid initiator configuration. ' + 'Reason: %s.') % err + LOG.error(msg) + raise exception.InvalidInput(msg) + + def _convert_one_iscsi_info(self, ini_text): + # get initiator configure attr list + attr_list = re.split('[{;}]', ini_text) + + # get initiator configures + ini = {} + for attr in attr_list: + if not attr: + continue + + pair = attr.split(':', 1) + if pair[0] == 'CHAPinfo': + value = pair[1].replace('#', ';', 1) + else: + value = pair[1] + ini[pair[0]] = value + if 'Name' not in ini and 'HostName' not in ini: + msg = _('Name or HostName must be specified for' + ' initiator.') + LOG.error(msg) + raise exception.InvalidInput(msg) + + return ini + + def _parse_remote_initiator_info(self, dev, ini_type): + ini_info = {'default_target_ips': []} + + if dev.get('iscsi_default_target_ip'): + ini_info['default_target_ips'] = dev[ + 'iscsi_default_target_ip'].split(';') + + initiators = {} + if ini_type in dev: + # Analyze initiators configure text, convert to: + # [{'Name':'xxx'}, {'Name':'xxx','CHAPinfo':'mm-usr#mm-pwd'}] + ini_list = re.split('\n', dev[ini_type]) + + for text in ini_list: + ini = self._convert_one_iscsi_info(text.strip()) + if 'Name' in ini: + initiators[ini['Name']] = ini + if 'HostName' in ini: + initiators[ini['HostName']] = ini + + if ini_list and not initiators: + msg = _("Name or HostName must be set one") + LOG.error(msg) + raise exception.InvalidInput(msg) + + ini_info['initiators'] = initiators + self._check_hostname_regex_config(ini_info) + return ini_info + + def _hypermetro_devices(self, xml_root): + dev = self.conf.safe_get('hypermetro_device') + config = {} + + if dev: + config = { + 'san_address': dev['san_address'].split(';'), + 'san_user': dev['san_user'], + 'san_password': dev['san_password'], + 'vstore_name': dev.get('vstore_name'), + 'metro_domain': dev['metro_domain'], + 'storage_pools': dev['storage_pool'].split(';')[:1], + 'iscsi_info': self._parse_remote_initiator_info( + dev, 'iscsi_info'), + 'fc_info': self._parse_remote_initiator_info( + dev, 'fc_info'), + 'sync_speed': self.conf.hyper_sync_speed, + 'metro_sync_completed': dev['metro_sync_completed'] + if 'metro_sync_completed' in dev else "True" + } + + setattr(self.conf, 'hypermetro', config) + + def _replication_devices(self, xml_root): + replication_devs = self.conf.safe_get('replication_device') + config = {} + + if replication_devs: + dev = replication_devs[0] + config = { + 'backend_id': dev['backend_id'], + 'san_address': dev['san_address'].split(';'), + 'san_user': dev['san_user'], + 'san_password': dev['san_password'], + 'vstore_name': dev.get('vstore_name'), + 'storage_pools': dev['storage_pool'].split(';')[:1], + 'iscsi_info': self._parse_remote_initiator_info( + dev, 'iscsi_info'), + 'fc_info': self._parse_remote_initiator_info( + dev, 'fc_info'), + 'sync_speed': self.conf.replica_sync_speed, + } + + setattr(self.conf, 'replication', config) + + def _lun_copy_speed(self, xml_root): + text = xml_root.findtext('LUN/LUNCopySpeed') + if text and text.strip() not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("Invalid LUNCopySpeed '%(text)s', LUNCopySpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.LUN_COPY_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'lun_copy_speed', int(speed)) + + def _lun_copy_mode(self, xml_root): + clone_mode = constants.DEFAULT_CLONE_MODE + text = xml_root.findtext('LUN/LUNCloneMode') + if text: + clone_mode = text.strip() + if clone_mode not in constants.SUPPORT_CLONE_MODE: + msg = _("%(array)s array requires %(valid)s lun type, " + "but %(conf)s is specified." + ) % {'array': self.conf.san_product, + 'valid': constants.SUPPORT_CLONE_MODE, + 'conf': clone_mode} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + setattr(self.conf, 'clone_mode', clone_mode) + + def _hyper_pair_sync_speed(self, xml_root): + text = xml_root.findtext('LUN/HyperSyncSpeed') + if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES: + msg = (_("Invalid HyperSyncSpeed '%(text)s', HyperSyncSpeed must " + "be between %(low)s and %(high)s.") + % {"text": text, "low": constants.HYPER_SYNC_SPEED_LOW, + "high": constants.HYPER_SYNC_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.HYPER_SYNC_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'hyper_sync_speed', int(speed)) + + def _replication_pair_sync_speed(self, xml_root): + text = xml_root.findtext('LUN/ReplicaSyncSpeed') + if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES: + msg = (_("Invalid ReplicaSyncSpeed '%(text)s', ReplicaSyncSpeed " + "must be between %(low)s and %(high)s.") + % {"text": text, "low": constants.REPLICA_SYNC_SPEED_LOW, + "high": constants.REPLICA_SYNC_SPEED_HIGHEST}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not text: + speed = constants.REPLICA_SYNC_SPEED_MEDIUM + else: + speed = text.strip() + setattr(self.conf, 'replica_sync_speed', int(speed)) + + def _lun_copy_wait_interval(self, xml_root): + text = xml_root.findtext('LUN/LUNcopyWaitInterval') + + if text and not text.isdigit(): + msg = (_("Invalid LUN_Copy_Wait_Interval '%s', " + "LUN_Copy_Wait_Interval must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL + setattr(self.conf, 'lun_copy_wait_interval', int(interval)) + + def _lun_timeout(self, xml_root): + text = xml_root.findtext('LUN/Timeout') + + if text and not text.isdigit(): + msg = (_("Invalid LUN timeout '%s', LUN timeout must be a digit.") + % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT + setattr(self.conf, 'lun_timeout', int(interval)) + + def _get_minimum_fc_initiator(self, xml_root): + text = xml_root.findtext('FC/MinOnlineFCInitiator') + minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE + + if text and not text.isdigit(): + msg = (_("Invalid FC MinOnlineFCInitiator '%s', " + "MinOnlineFCInitiator must be a digit.") % text) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if text and text.strip() and text.strip().isdigit(): + try: + minimum_fc_initiator = int(text.strip()) + except Exception as err: + msg = (_("Minimum FC initiator number %(num)s is set" + " too large, reason is %(err)s") + % {"num": text.strip(), "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + setattr(self.conf, 'min_fc_ini_online', + minimum_fc_initiator) diff --git a/Cinder/Train/huawei_driver.py b/Cinder/Train/huawei_driver.py new file mode 100644 index 0000000..71df409 --- /dev/null +++ b/Cinder/Train/huawei_driver.py @@ -0,0 +1,250 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from cinder import coordination +from cinder import exception +from cinder.i18n import _ +from cinder import interface + +from cinder.volume import driver +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_base_driver +from cinder.volume.drivers.huawei import huawei_flow +from cinder.volume.drivers.huawei import huawei_utils +from cinder.zonemanager import utils as zm_utils + + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class HuaweiISCSIDriver(huawei_base_driver.HuaweiBaseDriver, + driver.ISCSIDriver): + def __init__(self, *args, **kwargs): + super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) + + def get_volume_stats(self, refresh=False): + if not self._stats or refresh: + super(HuaweiISCSIDriver, self).get_volume_stats() + self._stats['storage_protocol'] = 'iSCSI' + + return self._stats + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + LOG.info('Initialize iscsi connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + if not connector.get('multipath'): + msg = _("Mapping hypermetro volume must use multipath.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if not self.hypermetro_rmt_cli: + msg = _("Mapping hypermetro volume requires remote.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + local_mapping = huawei_flow.initialize_iscsi_connection( + volume, constants.LUN_TYPE, connector, self.local_cli, + self.configuration) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _("Mapping hypermetro remote volume error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remote_mapping = huawei_flow.initialize_remote_iscsi_connection( + hypermetro['ID'], connector, self.hypermetro_rmt_cli, + self.configuration) + + same_host_lun_id = self._change_same_host_lun_id( + local_mapping, remote_mapping) + mapping_info = self._merge_iscsi_mapping( + local_mapping, remote_mapping, same_host_lun_id) + else: + mapping_info = local_mapping + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'iscsi', + 'data': mapping_info} + LOG.info('Initialize iscsi connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + LOG.info('Terminate iscsi connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + if self._is_volume_multi_attach_to_same_host(volume, connector): + return + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if hypermetro: + huawei_flow.terminate_remote_iscsi_connection( + hypermetro['ID'], connector, self.hypermetro_rmt_cli) + + huawei_flow.terminate_iscsi_connection( + volume, constants.LUN_TYPE, connector, self.local_cli) + LOG.info('Terminate iscsi connection successfully.') + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Initialize iscsi connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + mapping_info = huawei_flow.initialize_iscsi_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.local_cli, + self.configuration) + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'iscsi', + 'data': mapping_info} + LOG.info('Initialize iscsi connection successfully: %s.', conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Terminate iscsi connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + huawei_flow.terminate_iscsi_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.local_cli) + LOG.info('Terminate iscsi connection successfully.') + + +@interface.volumedriver +class HuaweiFCDriver(huawei_base_driver.HuaweiBaseDriver, + driver.FibreChannelDriver): + def __init__(self, *args, **kwargs): + super(HuaweiFCDriver, self).__init__(*args, **kwargs) + self.fc_san = zm_utils.create_lookup_service() + + def get_volume_stats(self, refresh=False): + if not self._stats or refresh: + super(HuaweiFCDriver, self).get_volume_stats() + self._stats['storage_protocol'] = 'FC' + + return self._stats + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection(self, volume, connector): + LOG.info('Initialize FC connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + if not connector.get('multipath'): + msg = _("Mapping hypermetro volume must use multipath.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if not self.hypermetro_rmt_cli: + msg = _("Mapping hypermetro volume requires remote.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + local_mapping = huawei_flow.initialize_fc_connection( + volume, constants.LUN_TYPE, connector, self.fc_san, self.local_cli, + self.configuration) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _("Mapping hypermetro remote volume error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + remote_mapping = huawei_flow.initialize_remote_fc_connection( + hypermetro['ID'], connector, self.fc_san, + self.hypermetro_rmt_cli, self.configuration) + same_host_lun_id = self._change_same_host_lun_id( + local_mapping, remote_mapping) + mapping_info = self._merge_fc_mapping( + local_mapping, remote_mapping, same_host_lun_id) + else: + mapping_info = local_mapping + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'fibre_channel', + 'data': mapping_info} + LOG.info('Initialize FC connection successfully: %s.', conn) + zm_utils.add_fc_zone(conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection(self, volume, connector, **kwargs): + LOG.info('Terminate FC connection for volume %(id)s, ' + 'connector info %(conn)s.', + {'id': volume.id, 'conn': connector}) + if self._is_volume_multi_attach_to_same_host(volume, connector): + return + + metadata = huawei_utils.get_volume_private_data(volume) + if metadata.get('hypermetro'): + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if hypermetro: + rmt_ini_tgt_map = huawei_flow.terminate_remote_fc_connection( + hypermetro['ID'], connector, self.fc_san, + self.hypermetro_rmt_cli) + + loc_ini_tgt_map = huawei_flow.terminate_fc_connection( + volume, constants.LUN_TYPE, connector, self.fc_san, self.local_cli) + if metadata.get('hypermetro'): + self._merge_ini_tgt_map(loc_ini_tgt_map, rmt_ini_tgt_map) + + conn = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': loc_ini_tgt_map}, + } + LOG.info('Terminate FC connection successfully: %s.', conn) + zm_utils.remove_fc_zone(conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Initialize FC connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + mapping_info = huawei_flow.initialize_fc_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.fc_san, + self.local_cli) + + mapping_info.pop('aval_host_lun_ids', None) + conn = {'driver_volume_type': 'fibre_channel', + 'data': mapping_info} + LOG.info('Initialize FC connection successfully: %s.', conn) + zm_utils.add_fc_zone(conn) + return conn + + @coordination.synchronized('huawei-mapping-{connector[host]}') + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + LOG.info('Terminate FC connection for snapshot %(id)s, ' + 'connector info %(conn)s.', + {'id': snapshot.id, 'conn': connector}) + ini_tgt_map = huawei_flow.terminate_fc_connection( + snapshot, constants.SNAPSHOT_TYPE, connector, self.fc_san, + self.local_cli) + + conn = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': ini_tgt_map}, + } + LOG.info('Terminate FC connection successfully: %s.', conn) + zm_utils.remove_fc_zone(conn) + return conn diff --git a/Cinder/Train/huawei_flow.py b/Cinder/Train/huawei_flow.py new file mode 100644 index 0000000..339c628 --- /dev/null +++ b/Cinder/Train/huawei_flow.py @@ -0,0 +1,2553 @@ +# Copyright (c) 2017 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import json +import six +import uuid + +from oslo_log import log as logging +from oslo_utils import strutils + +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import smartx +from cinder.volume import volume_utils + +LOG = logging.getLogger(__name__) + + +class LunOptsCheckTask(task.Task): + default_provides = 'opts' + + def __init__(self, client, feature_support, new_opts=None, + *args, **kwargs): + super(LunOptsCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.new_opts = new_opts + + def execute(self, volume): + if self.new_opts: + opts = self.new_opts + else: + opts = huawei_utils.get_volume_params(volume) + + if opts['hypermetro'] and opts['replication_enabled']: + msg = _("Hypermetro and replication cannot be " + "specified at the same time.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + feature_pairs = ( + ('qos', 'SmartQoS'), + ('smartcache', 'SmartCache'), + ('smartpartition', 'SmartPartition'), + ('hypermetro', 'HyperMetro'), + ('replication_enabled', 'HyperReplication'), + ('policy', 'SmartTier'), + ('dedup', 'SmartDedupe[\s\S]*LUN'), + ('compression', 'SmartCompression[\s\S]*LUN'), + ) + + for feature in feature_pairs: + if opts.get(feature[0]) and not self.feature_support[feature[1]]: + msg = _("Huawei storage doesn't support %s.") % feature[1] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if opts.get('smartcache'): + smartcache = smartx.SmartCache(self.client) + smartcache.check_cache_valid(opts['cachename']) + + if opts.get('smartpartition'): + smartpartition = smartx.SmartPartition(self.client) + smartpartition.check_partition_valid(opts['partitionname']) + + return opts + + +class CreateLunTask(task.Task): + default_provides = ('lun_id', 'lun_info') + + def __init__(self, client, configuration, feature_support, + *args, **kwargs): + super(CreateLunTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + self.feature_support = feature_support + + def _get_lun_application_name(self, opts, lun_params): + if opts.get('applicationname') is not None: + workload_type_id = self.client.get_workload_type_id( + opts['applicationname']) + if workload_type_id: + lun_params['WORKLOADTYPEID'] = workload_type_id + else: + msg = _("The workload type %s is not exist. Please create it " + "on the array") % opts['applicationname'] + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return lun_params + + def execute(self, volume, opts, src_size=None): + pool_name = volume_utils.extract_host(volume.host, level='pool') + pool_id = self.client.get_pool_id(pool_name) + if not pool_id: + msg = _("Pool %s doesn't exist in storage.") % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_params = { + 'NAME': huawei_utils.encode_name(volume.id), + 'PARENTID': pool_id, + 'DESCRIPTION': volume.name, + 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), + 'CAPACITY': int(int(src_size) * constants.CAPACITY_UNIT if src_size + else int(volume.size) * constants.CAPACITY_UNIT), + } + + if opts.get('controllername'): + controller = self.client.get_controller_id(opts['controllername']) + if controller: + lun_params['OWNINGCONTROLLER'] = controller + if hasattr(self.configuration, 'write_type'): + lun_params['WRITEPOLICY'] = self.configuration.write_type + if hasattr(self.configuration, 'prefetch_type'): + lun_params['PREFETCHPOLICY'] = self.configuration.prefetch_type + if hasattr(self.configuration, 'prefetch_value'): + lun_params['PREFETCHVALUE'] = self.configuration.prefetch_value + if opts.get('policy'): + lun_params['DATATRANSFERPOLICY'] = opts['policy'] + + if opts.get('dedup') is not None: + lun_params['ENABLESMARTDEDUP'] = opts['dedup'] + elif not self.feature_support['SmartDedupe[\s\S]*LUN']: + lun_params['ENABLESMARTDEDUP'] = False + + if opts.get('compression') is not None: + lun_params['ENABLECOMPRESSION'] = opts['compression'] + elif not self.feature_support['SmartCompression[\s\S]*LUN']: + lun_params['ENABLECOMPRESSION'] = False + + lun_params = self._get_lun_application_name(opts, lun_params) + + lun = self.client.create_lun(lun_params) + return lun['ID'], lun + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result[0]) + + +class WaitLunOnlineTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(WaitLunOnlineTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id): + huawei_utils.wait_lun_online(self.client, lun_id) + + +class AddQoSTask(task.Task): + default_provides = 'qos_id' + + def __init__(self, client, *args, **kwargs): + super(AddQoSTask, self).__init__(*args, **kwargs) + self.smartqos = smartx.SmartQos(client) + + def execute(self, lun_id, opts): + if opts.get('qos'): + qos_id = self.smartqos.add(opts['qos'], lun_id) + return qos_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartqos.remove(result, lun_id) + + +class AddCacheTask(task.Task): + default_provides = 'cache_id' + + def __init__(self, client, *args, **kwargs): + super(AddCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_id, opts): + if opts.get('smartcache'): + cache_id = self.smartcache.add(opts['cachename'], lun_id) + return cache_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartcache.remove(result, lun_id) + + +class AddPartitionTask(task.Task): + default_provides = 'partition_id' + + def __init__(self, client, *args, **kwargs): + super(AddPartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_id, opts): + if opts.get('smartpartition'): + partition_id = self.smartpartition.add( + opts['partitionname'], lun_id) + return partition_id + + def revert(self, result, lun_id, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.smartpartition.remove(result, lun_id) + + +class CreateHyperMetroTask(task.Task): + default_provides = 'hypermetro_id' + + def __init__(self, local_cli, remote_cli, config, is_sync=True, + *args, **kwargs): + super(CreateHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.loc_client = local_cli + self.rmt_client = remote_cli + self.sync = is_sync + + def execute(self, volume, lun_id, lun_info, opts): + metadata = huawei_utils.get_volume_private_data(volume) + + if opts.get('hypermetro') and not metadata.get('hypermetro'): + lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY', + 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = huawei_utils.encode_name(volume.id) + lun_params['DESCRIPTION'] = volume.name + if (lun_info.get("WORKLOADTYPENAME") and + lun_info.get("WORKLOADTYPEID")): + workload_type_name = self.loc_client.get_workload_type_name( + lun_info['WORKLOADTYPEID']) + rmt_workload_type_id = self.rmt_client.get_workload_type_id( + workload_type_name) + if rmt_workload_type_id: + lun_params['WORKLOADTYPEID'] = rmt_workload_type_id + else: + msg = _("The workload type %s is not exist. Please create " + "it on the array") % workload_type_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + hypermetro_id = self.hypermetro.create_hypermetro( + lun_id, lun_params, self.sync) + elif not opts.get('hypermetro') and metadata.get('hypermetro'): + hypermetro_id = None + else: + hypermetro = huawei_utils.get_hypermetro(self.loc_client, volume) + hypermetro_id = hypermetro['ID'] if hypermetro else None + + return hypermetro_id + + def revert(self, result, volume, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.hypermetro.delete_hypermetro(volume) + + +class AddHyperMetroGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(AddHyperMetroGroupTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + + def execute(self, volume, hypermetro_id): + if volume.group_id and hypermetro_id: + self.hypermetro.add_hypermetro_to_group( + volume.group_id, hypermetro_id) + + +class CreateReplicationTask(task.Task): + default_provides = 'replication_id' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(CreateReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + self.loc_client = local_cli + self.rmt_client = remote_cli + + def execute(self, volume, lun_id, lun_info, opts): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + + if opts.get('replication_enabled') and not pair_id: + lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY', + 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = huawei_utils.encode_name(volume.id) + lun_params['DESCRIPTION'] = volume.name + if (lun_info.get("WORKLOADTYPENAME") and + lun_info.get("WORKLOADTYPEID")): + workload_type_name = self.loc_client.get_workload_type_name( + lun_info['WORKLOADTYPEID']) + rmt_workload_type_id = self.rmt_client.get_workload_type_id( + workload_type_name) + if rmt_workload_type_id: + lun_params['WORKLOADTYPEID'] = rmt_workload_type_id + else: + msg = _("The workload type %s is not exist. Please create " + "it on the array") % workload_type_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + pair_id = self.replication.create_replica( + lun_id, lun_params, opts['replication_type']) + elif not opts.get('replication_enabled') and pair_id: + pair_id = None + + return pair_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + self.replication.delete_replica(result) + + +class AddReplicationGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(AddReplicationGroupTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, replication_id): + if volume.group_id and replication_id: + self.replication.add_replication_to_group( + volume.group_id, replication_id) + + +class CheckLunExistTask(task.Task): + default_provides = ('lun_info', 'lun_id') + + def __init__(self, client, *args, **kwargs): + super(CheckLunExistTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume): + lun_info = huawei_utils.get_lun_info(self.client, volume) + if not lun_info: + msg = _("Volume %s does not exist.") % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return lun_info, lun_info['ID'] + + +class GetLunIDTask(task.Task): + default_provides = 'lun_id' + + def __init__(self, client, *args, **kwargs): + super(GetLunIDTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume): + lun_info = huawei_utils.get_lun_info(self.client, volume) + if not lun_info: + LOG.error("Volume %s does not exist.", volume.id) + return None + + return lun_info['ID'] + + +class CheckLunMappedTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(CheckLunMappedTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_info): + if lun_info.get('EXPOSEDTOINITIATOR') == 'true': + msg = _("LUN %s has been mapped to host. Now force to " + "delete it") % lun_info['ID'] + LOG.warning(msg) + huawei_utils.remove_lun_from_lungroup(self.client, lun_info["ID"]) + + +class DeleteHyperMetroTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(DeleteHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + + def execute(self, volume, opts=None): + metadata = huawei_utils.get_volume_private_data(volume) + + if ((not opts or not opts.get('hypermetro')) + and metadata.get('hypermetro')): + self.hypermetro.delete_hypermetro(volume) + + +class DeleteReplicationTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(DeleteReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, opts=None): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + if (not opts or not opts.get('replication_enabled')) and pair_id: + self.replication.delete_replica(pair_id) + + +class DeleteQoSTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteQoSTask, self).__init__(*args, **kwargs) + self.smartqos = smartx.SmartQos(client) + + def execute(self, lun_info): + qos_id = lun_info.get('IOCLASSID') + if qos_id: + self.smartqos.remove(qos_id, lun_info['ID']) + + +class DeleteCacheTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_info): + cache_id = lun_info.get('SMARTCACHEPARTITIONID') + if cache_id: + self.smartcache.remove(cache_id, lun_info['ID']) + + +class DeletePartitionTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeletePartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_info): + partition_id = lun_info.get('CACHEPARTITIONID') + if partition_id: + self.smartpartition.remove(partition_id, lun_info['ID']) + + +class DeleteLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id): + self.client.delete_lun(lun_id) + + +class CreateMigratedLunTask(task.Task): + default_provides = ('tgt_lun_id', 'tgt_lun_info') + + def __init__(self, client, host, feature_support, *args, **kwargs): + super(CreateMigratedLunTask, self).__init__(*args, **kwargs) + self.client = client + self.host = host + self.feature_support = feature_support + + def execute(self, lun_info, opts=None): + if not self.feature_support['SmartMigration']: + msg = _("Huawei storage doesn't support SmartMigration.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + target_device = self.host['capabilities']['location_info'] + if target_device != self.client.device_id: + msg = _("Migrate target %(tgt)s is not the same storage as " + "%(org)s.") % {'tgt': target_device, + 'org': self.client.device_id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + pool_name = self.host['capabilities']['pool_name'] + pool_id = self.client.get_pool_id(pool_name) + if not pool_id: + msg = _("Pool %s doesn't exist in storage.") % pool_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if opts: + new_lun_type = opts.get('LUNType') + tier_policy = opts.get('policy') + else: + new_lun_type = None + tier_policy = None + + lun_keys = ('DESCRIPTION', 'ALLOCTYPE', 'CAPACITY', 'WRITEPOLICY', + 'PREFETCHPOLICY', 'PREFETCHVALUE', 'DATATRANSFERPOLICY', + 'OWNINGCONTROLLER') + lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info} + lun_params['NAME'] = lun_info['NAME'][:-4] + '-mig' + lun_params['PARENTID'] = pool_id + if new_lun_type: + lun_params['ALLOCTYPE'] = new_lun_type + if tier_policy: + lun_params['DATATRANSFERPOLICY'] = tier_policy + if lun_info.get("WORKLOADTYPENAME") and lun_info.get( + "WORKLOADTYPEID"): + lun_params["WORKLOADTYPEID"] = lun_info["WORKLOADTYPEID"] + + lun = self.client.create_lun(lun_params) + return lun['ID'], lun + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result[0]) + + +class CreateMigrateTask(task.Task): + default_provides = 'migration_id' + + def __init__(self, client, *args, **kwargs): + super(CreateMigrateTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, src_lun_id, tgt_lun_id): + migration = self.client.create_lun_migration(src_lun_id, tgt_lun_id) + return migration['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun_migration(result) + + +class WaitMigrateDoneTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(WaitMigrateDoneTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, migration_id, tgt_lun_id): + def _migrate_done(): + migration = self.client.get_lun_migration(migration_id) + if (migration['RUNNINGSTATUS'] in + constants.MIGRATION_STATUS_IN_PROCESS): + return False + elif (migration['RUNNINGSTATUS'] in + constants.MIGRATION_STATUS_COMPLETE): + return True + else: + msg = _("Migration %s error.") % migration_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + huawei_utils.wait_for_condition(_migrate_done, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + self.client.delete_lun_migration(migration_id) + self.client.delete_lun(tgt_lun_id) + + +class CheckSnapshotExistTask(task.Task): + default_provides = ('snapshot_info', 'snapshot_id') + + def __init__(self, client, *args, **kwargs): + super(CheckSnapshotExistTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot): + snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) + if not snapshot_info: + msg = _("Snapshot %s does not exist.") % snapshot.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return snapshot_info, snapshot_info['ID'] + + +class GetSnapshotIDTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, *args, **kwargs): + super(GetSnapshotIDTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot): + snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) + if not snapshot_info: + LOG.error("Snapshot %s does not exist.", snapshot.id) + return None + + return snapshot_info['ID'] + + +class CreateLunCopyTask(task.Task): + default_provides = 'luncopy_id' + + def __init__(self, client, feature_support, configuration, + *args, **kwargs): + super(CreateLunCopyTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.configuration = configuration + + def execute(self, volume, snapshot_id, lun_id): + if not self.feature_support['HyperCopy']: + msg = _("Huawei storage doesn't support HyperCopy.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + copy_name = huawei_utils.encode_name(volume.id) + metadata = huawei_utils.get_volume_private_data(volume) + copyspeed = metadata.get('copyspeed') + if not copyspeed: + copyspeed = self.configuration.lun_copy_speed + elif copyspeed not in constants.LUN_COPY_SPEED_TYPES: + msg = (_("LUN copy speed is: %(speed)s. It should be between " + "%(low)s and %(high)s.") + % {"speed": copyspeed, + "low": constants.LUN_COPY_SPEED_LOW, + "high": constants.LUN_COPY_SPEED_HIGH}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + luncopy_id = self.client.create_luncopy( + copy_name, snapshot_id, lun_id, copyspeed) + return luncopy_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_luncopy(result) + + +class WaitLunCopyDoneTask(task.Task): + def __init__(self, client, configuration, *args, **kwargs): + super(WaitLunCopyDoneTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, luncopy_id): + self.client.start_luncopy(luncopy_id) + + def _luncopy_done(): + luncopy = self.client.get_luncopy_info(luncopy_id) + if luncopy['HEALTHSTATUS'] != constants.STATUS_HEALTH: + msg = _("Luncopy %s is abnormal.") % luncopy_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return (luncopy['RUNNINGSTATUS'] in + constants.LUNCOPY_STATUS_COMPLETE) + huawei_utils.wait_for_condition( + _luncopy_done, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + + self.client.delete_luncopy(luncopy_id) + + +class CreateClonePairTask(task.Task): + default_provides = 'clone_pair_id' + + def __init__(self, client, feature_support, configuration, + *args, **kwargs): + super(CreateClonePairTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + self.configuration = configuration + + def execute(self, source_id, target_id): + clone_speed = self.configuration.lun_copy_speed + clone_pair_id = self.client.create_clone_pair( + source_id, target_id, clone_speed) + return clone_pair_id + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_clone_pair(result) + + +class WaitClonePairDoneTask(task.Task): + def __init__(self, client, configuration, *args, **kwargs): + super(WaitClonePairDoneTask, self).__init__(*args, **kwargs) + self.client = client + self.configuration = configuration + + def execute(self, clone_pair_id): + def _clone_pair_done(): + clone_pair_info = self.client.get_clone_pair_info(clone_pair_id) + if clone_pair_info['copyStatus'] != constants.CLONE_STATUS_HEALTH: + msg = _("ClonePair %s is abnormal.") % clone_pair_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return (clone_pair_info['syncStatus'] in + constants.CLONE_STATUS_COMPLETE) + + self.client.sync_clone_pair(clone_pair_id) + huawei_utils.wait_for_condition( + _clone_pair_done, self.configuration.lun_copy_wait_interval, + self.configuration.lun_timeout) + self.client.delete_clone_pair(clone_pair_id) + + +class CreateLunCloneTask(task.Task): + default_provides = 'lun_id', 'lun_info' + + def __init__(self, client, *args, **kwargs): + super(CreateLunCloneTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume, src_id): + name = huawei_utils.encode_name(volume.id) + lun_info = self.client.create_lunclone(src_id, name) + lun_id = lun_info["ID"] + try: + expected_size = int(volume.size) * constants.CAPACITY_UNIT + if int(lun_info['CAPACITY']) < expected_size: + self.client.extend_lun(lun_id, expected_size) + + self.client.split_lunclone(lun_id) + except Exception: + LOG.exception('Split clone lun %s error.', lun_id) + self.client.delete_lun(lun_id) + raise + + lun_info = self.client.get_lun_info_by_id(lun_id) + return lun_info['ID'], lun_info + + +class LunClonePreCheckTask(task.Task): + def __init__(self, *args, **kwargs): + super(LunClonePreCheckTask, self).__init__(*args, **kwargs) + + @staticmethod + def execute(volume, src_volume): + if volume.volume_type_id != src_volume.volume_type_id: + msg = _("Volume type must be the same as source " + "for fast clone.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +class CreateSnapshotTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, feature_support, *args, **kwargs): + super(CreateSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + + def execute(self, snapshot): + if not self.feature_support['HyperSnap']: + msg = _("Huawei storage doesn't support snapshot.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_info = huawei_utils.get_lun_info(self.client, snapshot.volume) + if not lun_info: + msg = _("Source volume %s to create snapshot does not exist." + ) % snapshot.volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + name = huawei_utils.encode_name(snapshot.id) + snapshot_info = self.client.create_snapshot( + lun_info['ID'], name, snapshot.id) + return snapshot_info['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_snapshot(result) + + +class CreateTempSnapshotTask(task.Task): + default_provides = 'snapshot_id' + + def __init__(self, client, feature_support, *args, **kwargs): + super(CreateTempSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + self.feature_support = feature_support + + def execute(self, src_id): + if not self.feature_support['HyperSnap']: + msg = _("Huawei storage doesn't support snapshot.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + snap_id = six.text_type(uuid.uuid4()) + name = huawei_utils.encode_name(snap_id) + snapshot_info = self.client.create_snapshot(src_id, name, snap_id) + return snapshot_info['ID'] + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_snapshot(result) + + +class ActiveSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ActiveSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + self.client.activate_snapshot(snapshot_id) + + def revert(self, snapshot_id): + self.client.stop_snapshot(snapshot_id) + + +class WaitSnapshotReadyTask(task.Task): + default_provides = 'snapshot_wwn' + + def __init__(self, client, *args, **kwargs): + super(WaitSnapshotReadyTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + def _snapshot_ready(): + self.snapshot = self.client.get_snapshot_info_by_id(snapshot_id) + if self.snapshot['HEALTHSTATUS'] != constants.STATUS_HEALTH: + msg = _("Snapshot %s is fault.") % snapshot_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return not (self.snapshot['RUNNINGSTATUS'] == + constants.SNAPSHOT_INITIALIZING) + + huawei_utils.wait_for_condition(_snapshot_ready, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + return self.snapshot['WWN'] + + +class DeleteSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_info): + if snapshot_info['RUNNINGSTATUS'] == constants.SNAPSHOT_ACTIVATED: + self.client.stop_snapshot(snapshot_info['ID']) + self.client.delete_snapshot(snapshot_info['ID']) + + +class DeleteTempSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(DeleteTempSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot_id): + self.client.stop_snapshot(snapshot_id) + self.client.delete_snapshot(snapshot_id) + + +class ExtendVolumeTask(task.Task): + default_provides = 'lun_info' + + def __init__(self, client, *args, **kwargs): + super(ExtendVolumeTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id, new_size): + lun_info = self.client.get_lun_info_by_id(lun_id) + if int(lun_info['CAPACITY']) < new_size: + self.client.extend_lun(lun_id, new_size) + LOG.info('Extend LUN %(id)s to size %(new_size)s.', + {'id': lun_id, + 'new_size': new_size}) + lun_info = self.client.get_lun_info_by_id(lun_id) + return lun_info + + +class ExtendHyperMetroTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(ExtendHyperMetroTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.local_cli = local_cli + + def execute(self, volume, new_size): + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + return + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + msg = _('Volume %s is not in hypermetro pair') % volume.id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.hypermetro.extend_hypermetro(hypermetro['ID'], new_size) + + +class ExtendReplicationTask(task.Task): + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(ExtendReplicationTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def execute(self, volume, new_size): + data = huawei_utils.get_replication_data(volume) + pair_id = data.get('pair_id') + if pair_id: + self.replication.extend_replica(pair_id, new_size) + + +class UpdateLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_info, opts): + data = {} + compression_check = lun_info.get('ENABLECOMPRESSION') == 'true' + if not opts['compression'] and compression_check: + data["ENABLECOMPRESSION"] = 'false' + + dedup_check = lun_info.get('ENABLESMARTDEDUP') == 'true' + if not opts['dedup'] and dedup_check: + data["ENABLESMARTDEDUP"] = 'false' + + if (opts.get('policy') and + opts['policy'] != lun_info.get('DATATRANSFERPOLICY')): + data["DATATRANSFERPOLICY"] = opts['policy'] + + if data: + self.client.update_lun(lun_info['ID'], data) + + +class UpdateQoSTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateQoSTask, self).__init__(*args, **kwargs) + self.client = client + self.smartqos = smartx.SmartQos(client) + + def execute(self, lun_info, opts): + qos_id = lun_info.get('IOCLASSID') + if opts.get('qos'): + if qos_id: + self.smartqos.update(qos_id, opts['qos'], lun_info['ID']) + else: + self.smartqos.add(opts['qos'], lun_info['ID']) + elif qos_id: + self.smartqos.remove(qos_id, lun_info['ID']) + + +class UpdateCacheTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdateCacheTask, self).__init__(*args, **kwargs) + self.smartcache = smartx.SmartCache(client) + + def execute(self, lun_info, opts): + cache_id = lun_info.get('SMARTCACHEPARTITIONID') + if opts.get('smartcache'): + if cache_id: + self.smartcache.update( + cache_id, opts['cachename'], lun_info['ID']) + else: + self.smartcache.add(opts['cachename'], lun_info['ID']) + elif cache_id: + self.smartcache.remove(cache_id, lun_info['ID']) + + +class UpdatePartitionTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(UpdatePartitionTask, self).__init__(*args, **kwargs) + self.smartpartition = smartx.SmartPartition(client) + + def execute(self, lun_info, opts): + partition_id = lun_info.get('CACHEPARTITIONID') + if opts.get('smartpartition'): + if partition_id: + self.smartpartition.update( + partition_id, opts['partitionname'], lun_info['ID']) + else: + self.smartpartition.add(opts['partitionname'], lun_info['ID']) + elif partition_id: + self.smartpartition.remove(partition_id, lun_info['ID']) + + +class ManageVolumePreCheckTask(task.Task): + default_provides = ('lun_info', 'lun_id') + + def __init__(self, client, volume, existing_ref, configuration, + *args, **kwargs): + super(ManageVolumePreCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.volume = volume + self.existing_ref = existing_ref + self.configuration = configuration + + def _get_external_lun(self): + lun_info = huawei_utils.get_external_lun_info( + self.client, self.existing_ref) + if not lun_info: + msg = _('External lun %s not exist.') % self.existing_ref + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return lun_info + + def _check_lun_abnormal(self, lun_info, *args): + return lun_info['HEALTHSTATUS'] != constants.STATUS_HEALTH + + def _check_pool_inconsistency(self, lun_info, *args): + pool = volume_utils.extract_host(self.volume.host, 'pool') + return pool != lun_info['PARENTNAME'] + + def _check_lun_in_use(self, lun_info, *args): + return (lun_info.get('ISADD2LUNGROUP') == 'true' or + lun_info.get('EXPOSEDTOINITIATOR') == 'true') + + def _check_lun_in_hypermetro(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('HyperMetro') == 'TRUE' + + def _check_lun_in_replication(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('RemoteReplication') == 'TRUE' + + def _check_lun_in_splitmirror(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('SplitMirror') == 'TRUE' + + def _check_lun_in_hypermirror(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LUNMirror') == 'TRUE' + + def _check_lun_in_luncopy(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LunCopy') == 'TRUE' + + def _check_lun_in_migration(self, lun_info, *args): + rss = {} + if 'HASRSSOBJECT' in lun_info: + rss = json.loads(lun_info['HASRSSOBJECT']) + return rss.get('LunMigration') == 'TRUE' + + def _check_lun_not_common(self, lun_info, *args): + return (lun_info.get('MIRRORTYPE') != '0' or + lun_info.get('SUBTYPE') != '0') + + def _check_lun_consistency(self, lun_info, opts): + return ('LUNType' in opts and + opts['LUNType'] != lun_info['ALLOCTYPE']) + + def _check_lun_dedup_consistency(self, lun_info, opts): + dedup_flag = False + if opts.get('dedup') is not None: + dedup_enabled = lun_info['ENABLESMARTDEDUP'] == 'true' + if opts['dedup'] != dedup_enabled: + dedup_flag = True + return dedup_flag + + def _check_lun_compresison_consistency(self, lun_info, opts): + compression_flag = False + if opts.get('compression') is not None: + compression_enabled = lun_info['ENABLECOMPRESSION'] == 'true' + if opts['compression'] != compression_enabled: + compression_flag = True + return compression_flag + + def execute(self, opts): + lun_info = self._get_external_lun() + + for i in dir(self): + if callable(getattr(self, i)) and i.startswith('_check_'): + func = getattr(self, i) + if func(lun_info, opts): + msg = _("Volume managing pre check %s failed." + ) % func.__name__ + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return lun_info, lun_info['ID'] + + +class ManageLunTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ManageLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, volume, lun_info): + new_name = huawei_utils.encode_name(volume.id) + self.client.rename_lun(lun_info['ID'], new_name, volume.name) + + def revert(self, result, lun_info, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.rename_lun(lun_info['ID'], lun_info['NAME'], + lun_info['DESCRIPTION']) + + +class ManageSnapshotPreCheckTask(task.Task): + default_provides = 'snapshot_info' + + def __init__(self, client, snapshot, existing_ref, *args, **kwargs): + super(ManageSnapshotPreCheckTask, self).__init__(*args, **kwargs) + self.client = client + self.snapshot = snapshot + self.existing_ref = existing_ref + + def _get_external_snapshot(self): + snapshot_info = huawei_utils.get_external_snapshot_info( + self.client, self.existing_ref) + if not snapshot_info: + msg = _('External snapshot %s not exist.') % self.existing_ref + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return snapshot_info + + def _check_snapshot_abnormal(self, snapshot_info): + return snapshot_info['HEALTHSTATUS'] != constants.STATUS_HEALTH + + def _check_snapshot_in_use(self, snapshot_info): + return snapshot_info.get('EXPOSEDTOINITIATOR') == 'true' + + def _check_parent_volume_inconsistency(self, snapshot_info): + parent_info = huawei_utils.get_lun_info( + self.client, self.snapshot.volume) + return (not parent_info or + snapshot_info.get('PARENTID') != parent_info['ID']) + + def execute(self): + snapshot_info = self._get_external_snapshot() + for i in dir(self): + if callable(getattr(self, i)) and i.startswith('_check_'): + func = getattr(self, i) + if func(snapshot_info): + msg = _("Snapshot managing pre check %s failed." + ) % func.__name__ + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=self.existing_ref, reason=msg) + + return snapshot_info + + +class ManageSnapshotTask(task.Task): + def __init__(self, client, *args, **kwargs): + super(ManageSnapshotTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, snapshot, snapshot_info): + new_name = huawei_utils.encode_name(snapshot.id) + data = {'NAME': new_name} + self.client.update_snapshot(snapshot_info['ID'], data) + + if (snapshot_info.get('RUNNINGSTATUS') == + constants.SNAPSHOT_UNACTIVATED): + self.client.activate_snapshot(snapshot_info['ID']) + + +class CreateHyperMetroGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, feature_support, + *args, **kwargs): + super(CreateHyperMetroGroupTask, self).__init__(*args, **kwargs) + self.hypermetro = hypermetro.HuaweiHyperMetro( + local_cli, remote_cli, config) + self.feature_support = feature_support + + def execute(self, group, opts): + if any(opt for opt in opts if opt['hypermetro']): + if not self.feature_support['HyperMetro']: + msg = _("Huawei storage doesn't support HyperMetro.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.hypermetro.create_consistencygroup(group.id) + + def revert(self, result, group, **kwargs): + if isinstance(result, failure.Failure): + return + self.hypermetro.delete_consistencygroup(group.id, []) + + +class CreateReplicationGroupTask(task.Task): + def __init__(self, local_cli, remote_cli, config, feature_support, + *args, **kwargs): + super(CreateReplicationGroupTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + self.feature_support = feature_support + + def execute(self, group, opts): + create_group = False + replication_type = set() + for opt in opts: + if opt['replication_enabled']: + create_group = True + replication_type.add(opt['replication_type']) + + if create_group: + if not self.feature_support['HyperReplication']: + msg = _("Huawei storage doesn't support HyperReplication.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if len(replication_type) != 1: + msg = _("Multiple replication types exist in group.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.replication.create_group(group.id, replication_type.pop()) + + def revert(self, result, group, **kwargs): + if isinstance(result, failure.Failure): + return + self.replication.delete_group(group.id, []) + + +class GetISCSIConnectionTask(task.Task): + default_provides = ('target_ips', 'target_iqns', 'target_eths', + 'config_info') + + def __init__(self, client, iscsi_info, *args, **kwargs): + super(GetISCSIConnectionTask, self).__init__(*args, **kwargs) + self.client = client + self.iscsi_info = iscsi_info + + def _get_config_target_ips(self, ini): + if ini and ini.get('TargetIP'): + target_ips = [ip.strip() for ip in ini['TargetIP'].split() + if ip.strip()] + else: + target_ips = self.iscsi_info['default_target_ips'] + return target_ips + + def _get_port_ip(self, port_id): + iqn_info = port_id.split(',', 1)[0] + return iqn_info.split(':', 5)[5] + + def _get_port_iqn(self, port_id): + iqn_info = port_id.split(',', 1)[0] + return iqn_info.split('+')[1] + + def execute(self, connector): + ip_iqn_map = {} + target_ports = self.client.get_iscsi_tgt_ports() + for port in target_ports: + ip = self._get_port_ip(port['ID']) + normalized_ip = ipaddress.ip_address(six.text_type(ip)).exploded + ip_iqn_map[normalized_ip] = (port['ID'], port['ETHPORTID']) + + config_info = huawei_utils.find_config_info(self.iscsi_info, + connector=connector) + + config_ips = self._get_config_target_ips(config_info) + LOG.info('Configured iscsi ips %s.', config_ips) + + target_ips = [] + target_iqns = [] + target_eths = [] + + for ip in config_ips: + ip_addr = ipaddress.ip_address(six.text_type(ip)) + normalized_ip = ip_addr.exploded + if normalized_ip in ip_iqn_map: + if ip_addr.version == 6: + target_ips.append('[' + ip_addr.compressed + ']') + else: + target_ips.append(ip_addr.compressed) + + iqn = self._get_port_iqn(ip_iqn_map[normalized_ip][0]) + target_iqns.append(iqn) + target_eths.append(ip_iqn_map[normalized_ip][1]) + + if not target_ips or not target_iqns or not target_eths: + msg = _('Get iSCSI target ip&iqnð error.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Get iscsi target_ips: %s, target_iqns: %s, target_eths: %s.', + target_ips, target_iqns, target_eths) + + return target_ips, target_iqns, target_eths, config_info + + +class CreateHostTask(task.Task): + default_provides = 'host_id' + + def __init__(self, client, *args, **kwargs): + super(CreateHostTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, connector): + orig_host_name = connector['host'] + host_id = huawei_utils.get_host_id(self.client, orig_host_name) + if not host_id: + host_name = huawei_utils.encode_host_name(orig_host_name) + host_id = self.client.create_host(host_name, orig_host_name) + return host_id + + +class AddISCSIInitiatorTask(task.Task): + default_provides = 'chap_info' + + def __init__(self, client, iscsi_info, *args, **kwargs): + super(AddISCSIInitiatorTask, self).__init__(*args, **kwargs) + self.client = client + self.iscsi_info = iscsi_info + + def _get_chap_info(self, config): + chap_config = config.get('CHAPinfo') + if not chap_config: + return {} + + chap_name, chap_password = chap_config.split(';') + return {'CHAPNAME': chap_name, + 'CHAPPASSWORD': chap_password} + + def _get_alua_info(self, config): + alua_info = {'MULTIPATHTYPE': '0'} + if config.get('ALUA'): + alua_info['MULTIPATHTYPE'] = config['ALUA'] + + if alua_info['MULTIPATHTYPE'] == '1': + for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'): + if config.get(k): + alua_info[k] = config[k] + + return alua_info + + def execute(self, connector, host_id, config_info): + initiator = connector['initiator'] + self.client.add_iscsi_initiator(initiator) + + alua_info = self._get_alua_info(config_info) + self.client.associate_iscsi_initiator_to_host( + initiator, host_id, alua_info) + + chap_info = self._get_chap_info(config_info) + ini_info = self.client.get_iscsi_initiator(initiator) + if (ini_info['USECHAP'] == 'true' and not chap_info) or ( + ini_info['USECHAP'] == 'false' and chap_info): + self.client.update_iscsi_initiator_chap(initiator, chap_info) + + return chap_info + + +class CreateHostGroupTask(task.Task): + default_provides = 'hostgroup_id' + + def __init__(self, client, *args, **kwargs): + super(CreateHostGroupTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, host_id): + hostgroup_name = constants.HOSTGROUP_PREFIX + host_id + hostgroup_id = self.client.create_hostgroup(hostgroup_name) + self.client.associate_host_to_hostgroup(hostgroup_id, host_id) + return hostgroup_id + + +class CreateLunGroupTask(task.Task): + default_provides = 'lungroup_id' + + def __init__(self, client, *args, **kwargs): + super(CreateLunGroupTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, host_id, lun_id, lun_type): + lungroup_name = constants.LUNGROUP_PREFIX + host_id + lungroup_id = self.client.create_lungroup(lungroup_name) + self.client.associate_lun_to_lungroup(lungroup_id, lun_id, lun_type) + return lungroup_id + + def revert(self, result, lun_id, lun_type, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.remove_lun_from_lungroup(result, lun_id, lun_type) + + +class CreateMappingViewTask(task.Task): + default_provides = ('mappingview_id', 'hostlun_id', 'aval_host_lun_ids') + + def __init__(self, client, *args, **kwargs): + super(CreateMappingViewTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_id, lun_type, host_id, hostgroup_id, lungroup_id, + portgroup_id=None): + mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id + mappingview_id = self.client.create_mappingview(mappingview_name) + self.client.associate_hostgroup_to_mappingview( + mappingview_id, hostgroup_id) + self.client.associate_lungroup_to_mappingview( + mappingview_id, lungroup_id) + if portgroup_id: + self.client.associate_portgroup_to_mappingview( + mappingview_id, portgroup_id) + + if lun_type == constants.LUN_TYPE: + hostlun_id = self.client.get_lun_host_lun_id(host_id, lun_id) + else: + hostlun_id = self.client.get_snapshot_host_lun_id(host_id, lun_id) + + mappingview_info = self.client.get_mappingview_by_id(mappingview_id) + aval_host_lun_ids = json.loads( + mappingview_info['AVAILABLEHOSTLUNIDLIST']) + return mappingview_id, hostlun_id, aval_host_lun_ids + + +class GetISCSIPropertiesTask(task.Task): + default_provides = 'mapping_info' + + def execute(self, connector, hostlun_id, target_iqns, target_ips, + chap_info, mappingview_id, aval_host_lun_ids, lun_id, + lun_info): + hostlun_id = int(hostlun_id) + mapping_info = { + 'target_discovered': False, + 'hostlun_id': hostlun_id, + 'mappingview_id': mappingview_id, + 'aval_host_lun_ids': aval_host_lun_ids, + 'lun_id': lun_id, + } + + if connector.get('multipath'): + mapping_info.update({ + 'target_iqns': target_iqns, + 'target_portals': ['%s:3260' % ip for ip in target_ips], + 'target_luns': [hostlun_id] * len(target_ips), + }) + else: + mapping_info.update({ + 'target_iqn': target_iqns[0], + 'target_portal': '%s:3260' % target_ips[0], + 'target_lun': hostlun_id, + }) + + if chap_info: + mapping_info['auth_method'] = 'CHAP' + mapping_info['auth_username'] = chap_info['CHAPNAME'] + mapping_info['auth_password'] = chap_info['CHAPPASSWORD'] + + if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE: + mapping_info['discard'] = True + + return mapping_info + + +class GetHyperMetroRemoteLunTask(task.Task): + default_provides = ('lun_id', 'lun_info') + + def __init__(self, client, hypermetro_id, *args, **kwargs): + super(GetHyperMetroRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_id = hypermetro_id + + def execute(self): + hypermetro_info = self.client.get_hypermetro_by_id(self.hypermetro_id) + remote_lun_id = hypermetro_info['LOCALOBJID'] + remote_lun_info = self.client.get_lun_info_by_id(remote_lun_id) + return remote_lun_id, remote_lun_info + + +class GetLunMappingTask(task.Task): + default_provides = ('mappingview_id', 'lungroup_id', 'hostgroup_id', + 'portgroup_id', 'host_id') + + def __init__(self, client, *args, **kwargs): + super(GetLunMappingTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, connector): + host_name = connector['host'] + host_id = huawei_utils.get_host_id(self.client, host_name) + if not host_id: + LOG.warning('Host %s not exist, return success for ' + 'connection termination.', host_name) + return None, None, None, None, None + + mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id + mappingview = self.client.get_mappingview_by_name(mappingview_name) + if not mappingview: + LOG.warning('Mappingview %s not exist, return success for ' + 'connection termination.', mappingview_name) + return None, None, None, None, host_id + + lungroup_id = self.client.get_lungroup_in_mappingview( + mappingview['ID']) + portgroup_id = self.client.get_portgroup_in_mappingview( + mappingview['ID']) + hostgroup_id = self.client.get_hostgroup_in_mappingview( + mappingview['ID']) + + return (mappingview['ID'], lungroup_id, hostgroup_id, portgroup_id, + host_id) + + +class ClearLunMappingTask(task.Task): + default_provides = 'ini_tgt_map' + + def __init__(self, client, fc_san=None, *args, **kwargs): + super(ClearLunMappingTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + + def _get_obj_count_of_lungroup(self, lungroup_id): + lun_count = self.client.get_lun_count_of_lungroup(lungroup_id) + snap_count = self.client.get_snapshot_count_of_lungroup(lungroup_id) + return lun_count + snap_count + + def _delete_portgroup(self, mappingview_id, portgroup_id): + self.client.remove_portgroup_from_mappingview( + mappingview_id, portgroup_id) + + eth_ports = self.client.get_eth_ports_in_portgroup(portgroup_id) + fc_ports = self.client.get_fc_ports_in_portgroup(portgroup_id) + for p in [p['ID'] for p in eth_ports] + [p['ID'] for p in fc_ports]: + self.client.remove_port_from_portgroup(portgroup_id, p) + self.client.delete_portgroup(portgroup_id) + + def _delete_lungroup(self, mappingview_id, lungroup_id): + self.client.remove_lungroup_from_mappingview( + mappingview_id, lungroup_id) + self.client.delete_lungroup(lungroup_id) + + def _delete_hostgroup(self, mappingview_id, hostgroup_id, host_id): + self.client.remove_hostgroup_from_mappingview( + mappingview_id, hostgroup_id) + self.client.remove_host_from_hostgroup(hostgroup_id, host_id) + self.client.delete_hostgroup(hostgroup_id) + + def _delete_host(self, host_id): + iscsi_initiators = self.client.get_host_iscsi_initiators(host_id) + for ini in iscsi_initiators: + self.client.remove_iscsi_initiator_from_host(ini) + + fc_initiators = self.client.get_host_fc_initiators(host_id) + for ini in fc_initiators: + self.client.remove_fc_initiator_from_host(ini) + + self.client.delete_host(host_id) + + def _get_ini_tgt_map(self, connector, host_id): + ini_tgt_map = {} + portgroup = self.client.get_portgroup_by_name( + constants.PORTGROUP_PREFIX + host_id) + if portgroup: + ports = self.client.get_fc_ports_in_portgroup(portgroup['ID']) + port_wwns = [p['WWN'] for p in ports] + wwns = map(lambda x: x.lower(), connector['wwpns']) + for wwn in wwns: + ini_tgt_map[wwn] = port_wwns + + return ini_tgt_map + + def execute(self, connector, lun_id, lun_type, host_id, mappingview_id, + lungroup_id, hostgroup_id, portgroup_id): + obj_count = 0 + if lun_id and lungroup_id: + self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) + obj_count = self._get_obj_count_of_lungroup(lungroup_id) + + # If lungroup still has member objects, don't clear mapping relation. + if obj_count > 0: + LOG.info('Lungroup %(lg)s still has %(count)s members.', + {'lg': lungroup_id, 'count': obj_count}) + return {} + + ini_tgt_map = {} + if self.fc_san and host_id: + ini_tgt_map = self._get_ini_tgt_map(connector, host_id) + + if mappingview_id and portgroup_id: + self._delete_portgroup(mappingview_id, portgroup_id) + if mappingview_id and lungroup_id: + self._delete_lungroup(mappingview_id, lungroup_id) + if mappingview_id and hostgroup_id: + self._delete_hostgroup(mappingview_id, hostgroup_id, host_id) + if mappingview_id: + self.client.delete_mapping_view(mappingview_id) + if host_id: + self._delete_host(host_id) + + return ini_tgt_map + + +class GetFCConnectionTask(task.Task): + default_provides = ('ini_tgt_map', 'tgt_port_wwns') + + def __init__(self, client, fc_san, configuration, *args, **kwargs): + super(GetFCConnectionTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + self.configuration = configuration + + def _get_fc_ports(self, wwns): + contr_map = {} + slot_map = {} + port_map = {} + + fc_ports = self.client.get_fc_ports() + for port in fc_ports: + if port['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: + contr = port['PARENTID'].split('.')[0] + slot = port['PARENTID'] + port_wwn = port['WWN'] + + if contr not in contr_map: + contr_map[contr] = [slot] + elif slot not in contr_map[contr]: + contr_map[contr].append(slot) + + if slot not in slot_map: + slot_map[slot] = [port_wwn] + elif port_wwn not in slot_map[slot]: + slot_map[slot].append(port_wwn) + + port_map[port_wwn] = { + 'id': port['ID'], + 'runspeed': int(port['RUNSPEED']), + 'slot': slot, + } + + fabrics = self._get_fabric(wwns, list(port_map.keys())) + if not fabrics: + msg = _("No valid fabric connection..") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return contr_map, slot_map, port_map, fabrics + + def _get_fabric(self, ini_port_wwns, tgt_port_wwns): + ini_tgt_map = self.fc_san.get_device_mapping_from_network( + ini_port_wwns, tgt_port_wwns) + + def _filter_not_connected_fabric(fabric_name, fabric): + ini_port_wwn_list = fabric.get('initiator_port_wwn_list') + tgt_port_wwn_list = fabric.get('target_port_wwn_list') + + if not ini_port_wwn_list or not tgt_port_wwn_list: + LOG.warning("Fabric %(fabric_name)s doesn't really " + "connect host and array: %(fabric)s.", + {'fabric_name': fabric_name, + 'fabric': fabric}) + return None + + return set(ini_port_wwn_list), set(tgt_port_wwn_list) + + valid_fabrics = [] + for fabric in ini_tgt_map: + pair = _filter_not_connected_fabric(fabric, ini_tgt_map[fabric]) + if pair: + valid_fabrics.append(pair) + + LOG.info("Got fabric: %s.", valid_fabrics) + return valid_fabrics + + def _count_port_weight(self, port): + port_bandwidth = port['runspeed'] + portgroup_ids = self.client.get_portgroup_by_port_id(port['id'], 212) + weight = 1.0 / port_bandwidth if port_bandwidth > 0 else 1.0 + + return len(portgroup_ids), weight + + def _select_port_per_fabric(self, port_map, candid_ports, used_slots): + used_slot_pairs = [] + other_slot_pairs = [] + for p in candid_ports: + weight = self._count_port_weight(port_map[p]) + + if port_map[p]['slot'] in used_slots: + used_slot_pairs.append((weight, p)) + else: + other_slot_pairs.append((weight, p)) + + new_port = None + if other_slot_pairs: + sorted_pairs = sorted(other_slot_pairs, key=lambda a: a[0]) + new_port = sorted_pairs[0][1] + if not new_port and used_slot_pairs: + sorted_pairs = sorted(used_slot_pairs, key=lambda a: a[0]) + new_port = sorted_pairs[0][1] + + return new_port + + def _select_ports_per_contr(self, fabrics, slots, slot_map, port_map): + contr_ports = set() + for slot in slots: + contr_ports.update(slot_map[slot]) + + if len(fabrics) == 1: + select_fabrics = fabrics * 2 + else: + select_fabrics = fabrics + + used_slots = set() + selected_ports = set() + for fabric in select_fabrics: + new_port = self._select_port_per_fabric( + port_map, fabric[1] & contr_ports, used_slots) + if new_port: + selected_ports.add(new_port) + used_slots.add(port_map[new_port]['slot']) + + return selected_ports + + def _get_ports_in_use(self, host_id): + portgroup = self.client.get_portgroup_by_name( + constants.PORTGROUP_PREFIX + host_id) + if not portgroup: + return [] + ports = self.client.get_fc_ports_in_portgroup(portgroup['ID']) + return [p['WWN'] for p in ports] + + def _get_fc_zone(self, wwns, host_id): + selected_ports = set() + ini_tgt_map = {} + + used_ports = self._get_ports_in_use(host_id) + if not used_ports: + contr_map, slot_map, port_map, fabrics = self._get_fc_ports(wwns) + for contr in contr_map: + ports = self._select_ports_per_contr( + fabrics, contr_map[contr], slot_map, port_map) + selected_ports.update(ports) + + for fabric in fabrics: + for ini in fabric[0]: + ini_tgt_map[ini] = list(selected_ports & fabric[1]) + + return ini_tgt_map, list(selected_ports) + used_ports + + def _get_fc_link(self, wwns, host_id): + totals, frees = self.client.get_fc_initiators() + host_initiators = self.client.get_host_fc_initiators(host_id) + initiators = set(wwns) & set(totals) + invalids = initiators - set(host_initiators) - set(frees) + if invalids: + if (self.configuration.min_fc_ini_online == + constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE): + msg = _("There are invalid initiators %s. If you want to " + "continue to attach volume to host, configure " + "MinFCIniOnline in the XML file.") % invalids + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + initiators = (set(host_initiators) | set(frees)) & wwns + + if len(initiators) < self.configuration.min_fc_ini_online: + msg = (("The number of online fc initiator %(wwns)s less than" + " the set number: %(set)s.") + % {"wwns": initiators, + "set": self.configuration.min_fc_ini_online}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + ini_tgt_map = {} + tgt_port_wwns = set() + + for ini in initiators: + tgts = self.client.get_fc_target_wwpns(ini) + ini_tgt_map[ini] = tgts + tgt_port_wwns.update(tgts) + + return ini_tgt_map, list(tgt_port_wwns) + + def execute(self, connector, host_id): + wwns = map(lambda x: x.lower(), connector['wwpns']) + + if self.fc_san: + ini_tgt_map, tgt_port_wwns = self._get_fc_zone(wwns, host_id) + else: + ini_tgt_map, tgt_port_wwns = self._get_fc_link(wwns, host_id) + + if not tgt_port_wwns: + msg = _('No fc connection for wwns %s.') % wwns + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return ini_tgt_map, tgt_port_wwns + + +class AddFCInitiatorTask(task.Task): + def __init__(self, client, fc_info, *args, **kwargs): + super(AddFCInitiatorTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_info = fc_info + + def _get_alua_info(self, config): + alua_info = {'MULTIPATHTYPE': '0'} + if config.get('ALUA'): + alua_info['MULTIPATHTYPE'] = config['ALUA'] + + if alua_info['MULTIPATHTYPE'] == '1': + for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'): + if config.get(k): + alua_info[k] = config[k] + + return alua_info + + def execute(self, host_id, ini_tgt_map): + for ini in ini_tgt_map: + self.client.add_fc_initiator(ini) + + config_info = huawei_utils.find_config_info(self.fc_info, + initiator=ini) + alua_info = self._get_alua_info(config_info) + self.client.associate_fc_initiator_to_host(host_id, ini, alua_info) + + +class CreateFCPortGroupTask(task.Task): + default_provides = 'portgroup_id' + + def __init__(self, client, fc_san, *args, **kwargs): + super(CreateFCPortGroupTask, self).__init__(*args, **kwargs) + self.client = client + self.fc_san = fc_san + + def _get_fc_ports(self): + port_map = {} + fc_ports = self.client.get_fc_ports() + for port in fc_ports: + port_map[port['WWN']] = port['ID'] + return port_map + + def _get_ports_to_add(self, ini_tgt_map): + ports = set() + for tgts in six.itervalues(ini_tgt_map): + ports |= set(tgts) + return ports + + def execute(self, host_id, ini_tgt_map): + if not self.fc_san: + return None + + portgroup_name = constants.PORTGROUP_PREFIX + host_id + portgroup_id = self.client.create_portgroup(portgroup_name) + port_map = self._get_fc_ports() + ports = self._get_ports_to_add(ini_tgt_map) + for port in ports: + self.client.add_port_to_portgroup(portgroup_id, port_map[port]) + return portgroup_id + + def revert(self, result, ini_tgt_map, **kwargs): + if isinstance(result, failure.Failure): + return + if result: + port_map = self._get_fc_ports() + ports = self._get_ports_to_add(ini_tgt_map) + for port in ports: + self.client.remove_port_from_portgroup(result, port_map[port]) + + +class GetFCPropertiesTask(task.Task): + default_provides = 'mapping_info' + + def execute(self, ini_tgt_map, tgt_port_wwns, hostlun_id, mappingview_id, + aval_host_lun_ids, lun_id, lun_info): + hostlun_id = int(hostlun_id) + mapping_info = { + 'hostlun_id': hostlun_id, + 'mappingview_id': mappingview_id, + 'aval_host_lun_ids': aval_host_lun_ids, + 'target_discovered': True, + 'target_wwn': tgt_port_wwns, + 'target_lun': hostlun_id, + 'initiator_target_map': ini_tgt_map, + 'lun_id': lun_id, + } + + if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE: + mapping_info['discard'] = True + + return mapping_info + + +class ClassifyVolumeTask(task.Task): + default_provides = ('normal_volumes', 'replication_volumes') + + def execute(self, volumes): + normal_volumes = [] + replication_volumes = [] + + for v in volumes: + data = huawei_utils.to_dict(v.replication_driver_data) + if 'pair_id' in data: + replication_volumes.append(v) + else: + normal_volumes.append(v) + + return normal_volumes, replication_volumes + + +class FailoverVolumeTask(task.Task): + default_provides = 'volumes_update' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(FailoverVolumeTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def _failover_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + volume_update = {'volume_id': v.id, + 'updates': {'status': 'error'}} + volumes_update.append(volume_update) + + return volumes_update + + def execute(self, replication_volumes, normal_volumes): + volumes_update = self.replication.failover(replication_volumes) + volumes_update += self._failover_normal_volumes(normal_volumes) + return volumes_update + + +class FailbackVolumeTask(task.Task): + default_provides = 'volumes_update' + + def __init__(self, local_cli, remote_cli, config, *args, **kwargs): + super(FailbackVolumeTask, self).__init__(*args, **kwargs) + self.replication = replication.ReplicationManager( + local_cli, remote_cli, config) + + def _failback_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + volume_update = {'volume_id': v.id, + 'updates': {'status': 'available'}} + volumes_update.append(volume_update) + + return volumes_update + + def execute(self, replication_volumes, normal_volumes): + volumes_update = self.replication.failback(replication_volumes) + volumes_update += self._failback_normal_volumes(normal_volumes) + return volumes_update + + +def create_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('create_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support), + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + AddQoSTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro, + is_sync=False), + AddHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + AddReplicationGroupTask( + local_cli, replication_rmt_cli, configuration.replication), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_id = engine.storage.fetch('lun_id') + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_id, lun_info['WWN'], hypermetro_id, replication_id + + +def delete_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration): + store_spec = {'volume': volume} + work_flow = linear_flow.Flow('delete_volume') + work_flow.add( + CheckLunExistTask(local_cli), + CheckLunMappedTask(local_cli), + DeleteReplicationTask(local_cli, replication_rmt_cli, + configuration.replication), + DeleteHyperMetroTask(local_cli, hypermetro_rmt_cli, + configuration.hypermetro), + DeletePartitionTask(local_cli), + DeleteCacheTask(local_cli), + DeleteQoSTask(local_cli), + DeleteLunTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def migrate_volume(volume, host, local_cli, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('migrate_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support), + CheckLunExistTask(local_cli), + CreateMigratedLunTask(local_cli, host, feature_support), + WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}), + CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}), + WaitMigrateDoneTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def create_volume_from_snapshot( + volume, snapshot, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + metadata = huawei_utils.get_volume_metadata(volume) + clone_pair_flag = huawei_utils.is_support_clone_pair(local_cli) + work_flow = linear_flow.Flow('create_volume_from_snapshot') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support), + CheckSnapshotExistTask(local_cli, inject={'snapshot': snapshot})) + + if (strutils.bool_from_string(metadata.get('fastclone', False)) or + (metadata.get('fastclone') is None and + configuration.clone_mode == "fastclone")): + work_flow.add( + LunClonePreCheckTask(inject={'src_volume': snapshot}), + CreateLunCloneTask(local_cli, + rebind={'src_id': 'snapshot_id'}), + ) + elif clone_pair_flag: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support, + inject={"src_size": snapshot.volume_size}), + WaitLunOnlineTask(local_cli), + CreateClonePairTask(local_cli, feature_support, configuration, + rebind={'source_id': 'snapshot_id', + 'target_id': 'lun_id'}), + WaitClonePairDoneTask(local_cli, configuration),) + else: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + CreateLunCopyTask(local_cli, feature_support, configuration), + WaitLunCopyDoneTask(local_cli, configuration),) + + work_flow.add( + ExtendVolumeTask(local_cli, inject={ + "new_size": int(volume.size) * constants.CAPACITY_UNIT}), + AddQoSTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + AddHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + AddReplicationGroupTask( + local_cli, replication_rmt_cli, configuration.replication),) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_id = engine.storage.fetch('lun_id') + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_id, lun_info['WWN'], hypermetro_id, replication_id + + +def create_volume_from_volume( + volume, src_volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + store_spec = {'volume': volume} + metadata = huawei_utils.get_volume_metadata(volume) + clone_pair_flag = huawei_utils.is_support_clone_pair(local_cli) + work_flow = linear_flow.Flow('create_volume_from_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support), + CheckLunExistTask(local_cli, provides=('src_lun_info', 'src_id'), + inject={'volume': src_volume}), + ) + + if (strutils.bool_from_string(metadata.get('fastclone', False)) or + (metadata.get('fastclone') is None and + configuration.clone_mode == "fastclone")): + work_flow.add( + LunClonePreCheckTask(inject={'src_volume': src_volume}), + CreateLunCloneTask(local_cli), + ) + elif clone_pair_flag: + work_flow.add( + CreateLunTask(local_cli, configuration, feature_support, + inject={"src_size": src_volume.size}), + WaitLunOnlineTask(local_cli), + CreateClonePairTask(local_cli, feature_support, configuration, + rebind={'source_id': 'src_id', + 'target_id': 'lun_id'}), + WaitClonePairDoneTask(local_cli, configuration),) + else: + work_flow.add( + CreateTempSnapshotTask(local_cli, feature_support), + WaitSnapshotReadyTask(local_cli), + ActiveSnapshotTask(local_cli), + CreateLunTask(local_cli, configuration, feature_support), + WaitLunOnlineTask(local_cli), + CreateLunCopyTask(local_cli, feature_support, configuration), + WaitLunCopyDoneTask(local_cli, configuration), + DeleteTempSnapshotTask(local_cli), + ) + + work_flow.add( + ExtendVolumeTask(local_cli, inject={ + "new_size": int(volume.size) * constants.CAPACITY_UNIT}), + AddQoSTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + AddHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + AddReplicationGroupTask( + local_cli, replication_rmt_cli, configuration.replication),) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_id = engine.storage.fetch('lun_id') + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_id, lun_info['WWN'], hypermetro_id, replication_id + + +def create_snapshot(snapshot, local_cli, feature_support): + store_spec = {'snapshot': snapshot} + + work_flow = linear_flow.Flow('create_snapshot') + work_flow.add( + CreateSnapshotTask(local_cli, feature_support), + WaitSnapshotReadyTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + snapshot_id = engine.storage.fetch('snapshot_id') + snapshot_wwn = engine.storage.fetch('snapshot_wwn') + + return snapshot_id, snapshot_wwn + + +def delete_snapshot(snapshot, local_cli): + store_spec = {'snapshot': snapshot} + work_flow = linear_flow.Flow('delete_snapshot') + work_flow.add( + CheckSnapshotExistTask(local_cli), + DeleteSnapshotTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def extend_volume(volume, new_size, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration): + store_spec = {'volume': volume, + 'new_size': int(new_size) * constants.CAPACITY_UNIT} + work_flow = linear_flow.Flow('extend_volume') + work_flow.add( + CheckLunExistTask(local_cli), + ExtendHyperMetroTask(local_cli, hypermetro_rmt_cli, configuration), + ExtendReplicationTask(local_cli, replication_rmt_cli, configuration), + ExtendVolumeTask(local_cli) + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def retype(volume, new_opts, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('retype_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, new_opts), + CheckLunExistTask(local_cli), + UpdateLunTask(local_cli), + UpdateQoSTask(local_cli), + UpdateCacheTask(local_cli), + UpdatePartitionTask(local_cli), + DeleteHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + DeleteReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return hypermetro_id, replication_id + + +def retype_by_migrate(volume, new_opts, host, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('retype_volume_by_migrate') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support, new_opts), + CheckLunExistTask(local_cli), + CreateMigratedLunTask(local_cli, host, feature_support), + WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}), + CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}), + WaitMigrateDoneTask(local_cli), + UpdateQoSTask(local_cli), + AddCacheTask(local_cli), + AddPartitionTask(local_cli), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro, + rebind={'lun_info': 'tgt_lun_info'}), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication, + rebind={'lun_info': 'tgt_lun_info'}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return hypermetro_id, replication_id + + +def manage_existing(volume, existing_ref, local_cli, hypermetro_rmt_cli, + replication_rmt_cli, configuration, feature_support): + store_spec = {'volume': volume} + + work_flow = linear_flow.Flow('manage_volume') + work_flow.add( + LunOptsCheckTask(local_cli, feature_support), + ManageVolumePreCheckTask( + local_cli, volume, existing_ref, configuration), + ManageLunTask(local_cli), + UpdateQoSTask(local_cli), + UpdateLunTask(local_cli), + UpdateCacheTask(local_cli), + UpdatePartitionTask(local_cli), + DeleteHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + DeleteReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + CreateHyperMetroTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro), + CreateReplicationTask( + local_cli, replication_rmt_cli, configuration.replication), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + lun_info = engine.storage.fetch('lun_info') + hypermetro_id = engine.storage.fetch('hypermetro_id') + replication_id = engine.storage.fetch('replication_id') + return lun_info['ID'], lun_info['WWN'], hypermetro_id, replication_id + + +def manage_existing_snapshot(snapshot, existing_ref, local_cli): + store_spec = {'snapshot': snapshot} + + work_flow = linear_flow.Flow('manage_snapshot') + work_flow.add( + ManageSnapshotPreCheckTask(local_cli, snapshot, existing_ref), + ManageSnapshotTask(local_cli), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + snapshot_info = engine.storage.fetch('snapshot_info') + return snapshot_info['ID'], snapshot_info['WWN'] + + +def create_group(group, local_cli, hypermetro_rmt_cli, replication_rmt_cli, + configuration, feature_support): + opts = huawei_utils.get_group_type_params(group) + store_spec = {'group': group, + 'opts': opts} + + work_flow = linear_flow.Flow('create_group') + work_flow.add( + CreateHyperMetroGroupTask( + local_cli, hypermetro_rmt_cli, configuration.hypermetro, + feature_support), + CreateReplicationGroupTask( + local_cli, replication_rmt_cli, configuration.replication, + feature_support), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def initialize_iscsi_connection(lun, lun_type, connector, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('initialize_iscsi_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'})) + else: + work_flow.add( + CheckSnapshotExistTask( + client, provides=('snapshot_info', 'lun_id'), + rebind={'snapshot': 'lun'})) + + work_flow.add( + CreateHostTask(client), + GetISCSIConnectionTask(client, configuration.iscsi_info), + AddISCSIInitiatorTask(client, configuration.iscsi_info), + CreateHostGroupTask(client), + CreateLunGroupTask(client), + CreateMappingViewTask(client), + GetISCSIPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def initialize_remote_iscsi_connection(hypermetro_id, connector, + client, configuration): + store_spec = {'connector': connector, + 'lun_type': constants.LUN_TYPE} + work_flow = linear_flow.Flow('initialize_remote_iscsi_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + CreateHostTask(client), + GetISCSIConnectionTask(client, configuration.hypermetro['iscsi_info']), + AddISCSIInitiatorTask(client, configuration.hypermetro['iscsi_info']), + CreateHostGroupTask(client), + CreateLunGroupTask(client), + CreateMappingViewTask(client), + GetISCSIPropertiesTask(client), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def terminate_iscsi_connection(lun, lun_type, connector, client): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('terminate_iscsi_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add( + GetLunIDTask(client, rebind={'volume': 'lun'}), + ) + else: + work_flow.add( + GetSnapshotIDTask( + client, provides='lun_id', rebind={'snapshot': 'lun'}), + ) + + work_flow.add( + GetLunMappingTask(client), + ClearLunMappingTask(client), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def terminate_remote_iscsi_connection(hypermetro_id, connector, client): + store_spec = {'connector': connector} + work_flow = linear_flow.Flow('terminate_remote_iscsi_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + GetLunMappingTask(client), + ClearLunMappingTask(client, inject={'lun_type': constants.LUN_TYPE}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + +def initialize_fc_connection(lun, lun_type, connector, fc_san, client, + configuration): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('initialize_fc_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'})) + else: + work_flow.add( + CheckSnapshotExistTask( + client, provides=('snapshot_info', 'lun_id'), + rebind={'snapshot': 'lun'})) + + work_flow.add( + CreateHostTask(client), + GetFCConnectionTask(client, fc_san, configuration), + AddFCInitiatorTask(client, configuration.fc_info), + CreateHostGroupTask(client), + CreateLunGroupTask(client), + CreateFCPortGroupTask(client, fc_san), + CreateMappingViewTask(client), + GetFCPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def initialize_remote_fc_connection(hypermetro_id, connector, fc_san, client, + configuration): + store_spec = {'connector': connector, + 'lun_type': constants.LUN_TYPE} + work_flow = linear_flow.Flow('initialize_remote_fc_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + CreateHostTask(client), + GetFCConnectionTask(client, fc_san, configuration), + AddFCInitiatorTask(client, configuration.hypermetro['fc_info']), + CreateHostGroupTask(client), + CreateLunGroupTask(client), + CreateFCPortGroupTask(client, fc_san), + CreateMappingViewTask(client), + GetFCPropertiesTask(), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('mapping_info') + + +def terminate_fc_connection(lun, lun_type, connector, fc_san, client): + store_spec = {'connector': connector, + 'lun': lun, + 'lun_type': lun_type} + work_flow = linear_flow.Flow('terminate_fc_connection') + + if lun_type == constants.LUN_TYPE: + work_flow.add( + GetLunIDTask(client, rebind={'volume': 'lun'}), + ) + else: + work_flow.add( + GetSnapshotIDTask( + client, provides='lun_id', rebind={'snapshot': 'lun'}), + ) + + work_flow.add( + GetLunMappingTask(client), + ClearLunMappingTask(client, fc_san), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('ini_tgt_map') + + +def terminate_remote_fc_connection(hypermetro_id, connector, fc_san, client): + store_spec = {'connector': connector} + work_flow = linear_flow.Flow('terminate_remote_fc_connection') + + work_flow.add( + GetHyperMetroRemoteLunTask(client, hypermetro_id), + GetLunMappingTask(client), + ClearLunMappingTask(client, fc_san, + inject={'lun_type': constants.LUN_TYPE}), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('ini_tgt_map') + + +def failover(volumes, local_cli, replication_rmt_cli, configuration): + store_spec = {'volumes': volumes} + work_flow = linear_flow.Flow('failover') + work_flow.add( + ClassifyVolumeTask(), + FailoverVolumeTask(local_cli, replication_rmt_cli, + configuration.replication), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + volumes_update = engine.storage.fetch('volumes_update') + return volumes_update + + +def failback(volumes, local_cli, replication_rmt_cli, configuration): + store_spec = {'volumes': volumes} + work_flow = linear_flow.Flow('failback') + work_flow.add( + ClassifyVolumeTask(), + FailbackVolumeTask(local_cli, replication_rmt_cli, + configuration.replication), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + volumes_update = engine.storage.fetch('volumes_update') + return volumes_update diff --git a/Cinder/Train/huawei_utils.py b/Cinder/Train/huawei_utils.py new file mode 100644 index 0000000..6e7414d --- /dev/null +++ b/Cinder/Train/huawei_utils.py @@ -0,0 +1,583 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import json +import re +import retrying +import six + +from oslo_log import log as logging +from oslo_utils import strutils + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields +from cinder.volume.drivers.huawei import constants +from cinder.volume import qos_specs +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +def encode_name(name): + encoded_name = hashlib.md5(name.encode('utf-8')).hexdigest() + prefix = name.split('-')[0] + '-' + postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)] + return prefix + postfix + + +def old_encode_name(name): + pre_name = name.split("-")[0] + vol_encoded = six.text_type(hash(name)) + if vol_encoded.startswith('-'): + newuuid = pre_name + vol_encoded + else: + newuuid = pre_name + '-' + vol_encoded + return newuuid + + +def encode_host_name(name): + if name and len(name) > constants.MAX_NAME_LENGTH: + encoded_name = hashlib.md5(name.encode('utf-8')).hexdigest() + return encoded_name[:constants.MAX_NAME_LENGTH] + return name + + +def old_encode_host_name(name): + if name and len(name) > constants.MAX_NAME_LENGTH: + name = six.text_type(hash(name)) + return name + + +def wait_for_condition(func, interval, timeout): + def _retry_on_result(result): + return not result + + def _retry_on_exception(result): + return False + + r = retrying.Retrying(retry_on_result=_retry_on_result, + retry_on_exception=_retry_on_exception, + wait_fixed=interval * 1000, + stop_max_delay=timeout * 1000) + r.call(func) + + +def _get_volume_type(volume): + if volume.volume_type: + return volume.volume_type + if volume.volume_type_id: + return volume_types.get_volume_type(None, volume.volume_type_id) + + +def get_volume_params(volume): + volume_type = _get_volume_type(volume) + return get_volume_type_params(volume_type) + + +def get_volume_type_params(volume_type): + specs = {} + if isinstance(volume_type, dict) and volume_type.get('extra_specs'): + specs = volume_type['extra_specs'] + elif isinstance(volume_type, objects.VolumeType + ) and volume_type.extra_specs: + specs = volume_type.extra_specs + + vol_params = get_volume_params_from_specs(specs) + vol_params['qos'] = None + + if isinstance(volume_type, dict) and volume_type.get('qos_specs_id'): + vol_params['qos'] = _get_qos_specs(volume_type['qos_specs_id']) + elif isinstance(volume_type, objects.VolumeType + ) and volume_type.qos_specs_id: + vol_params['qos'] = _get_qos_specs(volume_type.qos_specs_id) + + LOG.info('volume opts %s.', vol_params) + return vol_params + + +def get_volume_params_from_specs(specs): + opts = _get_opts_from_specs(specs) + + _verify_smartcache_opts(opts) + _verify_smartpartition_opts(opts) + _verify_smartthin_opts(opts) + _verify_controller_opts(opts) + _verify_application_type_opts(opts) + + return opts + + +def _get_bool_param(k, v): + words = v.split() + if len(words) == 2 and words[0] == '': + return strutils.bool_from_string(words[1], strict=True) + + msg = _("%(k)s spec must be specified as %(k)s=' True' " + "or ' False'.") % {'k': k} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _get_replication_type_param(k, v): + words = v.split() + if len(words) == 2 and words[0] == '': + REPLICA_SYNC_TYPES = {'sync': constants.REPLICA_SYNC_MODEL, + 'async': constants.REPLICA_ASYNC_MODEL} + sync_type = words[1].lower() + if sync_type in REPLICA_SYNC_TYPES: + return REPLICA_SYNC_TYPES[sync_type] + + msg = _("replication_type spec must be specified as " + "replication_type=' sync' or ' async'.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _get_string_param(k, v): + if not v: + msg = _("%s spec must be specified as a string.") % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return v + + +def _get_opts_from_specs(specs): + """Get the well defined extra specs.""" + opts = {} + + opts_capability = { + 'capabilities:smarttier': (_get_bool_param, False), + 'capabilities:smartcache': (_get_bool_param, False), + 'capabilities:smartpartition': (_get_bool_param, False), + 'capabilities:thin_provisioning_support': (_get_bool_param, False), + 'capabilities:thick_provisioning_support': (_get_bool_param, False), + 'capabilities:hypermetro': (_get_bool_param, False), + 'capabilities:replication_enabled': (_get_bool_param, False), + 'replication_type': (_get_replication_type_param, + constants.REPLICA_ASYNC_MODEL), + 'smarttier:policy': (_get_string_param, None), + 'smartcache:cachename': (_get_string_param, None), + 'smartpartition:partitionname': (_get_string_param, None), + 'huawei_controller:controllername': (_get_string_param, None), + 'capabilities:dedup': (_get_bool_param, None), + 'capabilities:compression': (_get_bool_param, None), + 'capabilities:huawei_controller': (_get_bool_param, False), + 'capabilities:huawei_application_type': (_get_bool_param, False), + 'huawei_application_type:applicationname': (_get_string_param, None), + } + + def _get_opt_key(spec_key): + key_split = spec_key.split(':') + if len(key_split) == 1: + return key_split[0] + else: + return key_split[1] + + for spec_key in opts_capability: + opt_key = _get_opt_key(spec_key) + opts[opt_key] = opts_capability[spec_key][1] + + for key, value in six.iteritems(specs): + if key not in opts_capability: + continue + + func = opts_capability[key][0] + opt_key = _get_opt_key(key) + opts[opt_key] = func(key, value) + + return opts + + +def _get_qos_specs(qos_specs_id): + ctxt = context.get_admin_context() + specs = qos_specs.get_qos_specs(ctxt, qos_specs_id) + if specs is None: + return {} + + if specs.get('consumer') == 'front-end': + return {} + + kvs = specs.get('specs', {}) + LOG.info('The QoS specs is: %s.', kvs) + + qos = {'IOTYPE': kvs.pop('IOType', None)} + + if qos['IOTYPE'] not in constants.QOS_IOTYPES: + msg = _('IOType must be in %(types)s.' + ) % {'types': constants.QOS_IOTYPES} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + for k, v in kvs.items(): + if k not in constants.QOS_SPEC_KEYS: + msg = _('QoS key %s is not valid.') % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if int(v) <= 0: + msg = _('QoS value for %s must > 0.') % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + qos[k.upper()] = v + + if len(qos) < 2: + msg = _('QoS policy must specify both IOType and one another ' + 'qos spec, got policy: %s.') % qos + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + qos_keys = set(qos.keys()) + if (qos_keys & set(constants.UPPER_LIMIT_KEYS) and + qos_keys & set(constants.LOWER_LIMIT_KEYS)): + msg = _('QoS policy upper limit and lower limit ' + 'conflict, QoS policy: %s.') % qos + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + return qos + + +def _verify_smartthin_opts(opts): + if (opts['thin_provisioning_support'] and + opts['thick_provisioning_support']): + msg = _('Cannot set thin and thick at the same time.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + elif opts['thin_provisioning_support']: + opts['LUNType'] = constants.THIN_LUNTYPE + elif opts['thick_provisioning_support']: + opts['LUNType'] = constants.THICK_LUNTYPE + + +def _verify_smartcache_opts(opts): + if opts['smartcache'] and not opts['cachename']: + msg = _('Cache name is not specified, please set ' + 'smartcache:cachename in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_application_type_opts(opts): + if opts['huawei_application_type'] and not opts['applicationname']: + msg = _('WorkloadType name is None, please set ' + 'huawei_application_type:applicationname in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_controller_opts(opts): + if opts['huawei_controller'] and not opts['controllername']: + msg = _('Controller name is None, please set ' + 'huawei_controller:controllername in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _verify_smartpartition_opts(opts): + if opts['smartpartition'] and not opts['partitionname']: + msg = _('Partition name is not specified, please set ' + 'smartpartition:partitionname in extra specs.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def wait_lun_online(client, lun_id, wait_interval=None, wait_timeout=None): + def _lun_online(): + result = client.get_lun_info_by_id(lun_id) + if result['HEALTHSTATUS'] not in (constants.STATUS_HEALTH, + constants.STATUS_INITIALIZE): + err_msg = _('LUN %s is abnormal.') % lun_id + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if result['RUNNINGSTATUS'] in (constants.LUN_INITIALIZING, + constants.STATUS_INITIALIZE): + return False + + return True + + if not wait_interval: + wait_interval = constants.DEFAULT_WAIT_INTERVAL + if not wait_timeout: + wait_timeout = wait_interval * 10 + + wait_for_condition(_lun_online, wait_interval, wait_timeout) + + +def is_not_exist_exc(exc): + msg = getattr(exc, 'msg', '') + return 'not exist' in msg + + +def to_string(**kwargs): + return json.dumps(kwargs) if kwargs else '' + + +def to_dict(text): + return json.loads(text) if text else {} + + +def get_volume_private_data(volume): + if not volume.provider_location: + return {} + + try: + info = json.loads(volume.provider_location) + except Exception: + LOG.exception("Decode provider_location error") + return {} + + if isinstance(info, dict): + info['hypermetro'] = (info.get('hypermetro_id') + or info.get('hypermetro')) + return info + + # To keep compatible with old driver version + return {'huawei_lun_id': six.text_type(info), + 'huawei_lun_wwn': volume.admin_metadata.get('huawei_lun_wwn'), + 'huawei_sn': volume.metadata.get('huawei_sn'), + 'hypermetro': True if volume.metadata.get( + 'hypermetro_id') else False, + } + + +def get_volume_metadata(volume): + if isinstance(volume, objects.Volume): + return volume.metadata + if volume.get('volume_metadata'): + return {item['key']: item['value'] for item in + volume['volume_metadata']} + return {} + + +def get_replication_data(volume): + if not volume.replication_driver_data: + return {} + + return json.loads(volume.replication_driver_data) + + +def get_snapshot_private_data(snapshot): + if not snapshot.provider_location: + return {} + + info = json.loads(snapshot.provider_location) + if isinstance(info, dict): + return info + + # To keep compatible with old driver version + return {'huawei_snapshot_id': six.text_type(info), + 'huawei_snapshot_wwn': snapshot.metadata.get( + 'huawei_snapshot_wwn'), + } + + +def get_external_lun_info(client, external_ref): + lun_info = None + if 'source-id' in external_ref: + lun = client.get_lun_info_by_id(external_ref['source-id']) + lun_info = client.get_lun_info_by_name(lun['NAME']) + elif 'source-name' in external_ref: + lun_info = client.get_lun_info_by_name(external_ref['source-name']) + + return lun_info + + +def get_external_snapshot_info(client, external_ref): + snapshot_info = None + if 'source-id' in external_ref: + snapshot_info = client.get_snapshot_info_by_id( + external_ref['source-id']) + elif 'source-name' in external_ref: + snapshot_info = client.get_snapshot_info_by_name( + external_ref['source-name']) + + return snapshot_info + + +def get_lun_info(client, volume): + metadata = get_volume_private_data(volume) + + volume_name = encode_name(volume.id) + lun_info = client.get_lun_info_by_name(volume_name) + + # If new encoded way not found, try the old encoded way. + if not lun_info: + volume_name = old_encode_name(volume.id) + lun_info = client.get_lun_info_by_name(volume_name) + + if not lun_info and metadata.get('huawei_lun_id'): + lun_info = client.get_lun_info_filter_id(metadata['huawei_lun_id']) + + if lun_info and ('huawei_lun_wwn' in metadata and + lun_info.get('WWN') != metadata['huawei_lun_wwn']): + lun_info = None + + return lun_info + + +def get_snapshot_info(client, snapshot): + name = encode_name(snapshot.id) + snapshot_info = client.get_snapshot_info_by_name(name) + + # If new encoded way not found, try the old encoded way. + if not snapshot_info: + name = old_encode_name(snapshot.id) + snapshot_info = client.get_snapshot_info_by_name(name) + + return snapshot_info + + +def get_host_id(client, host_name): + encoded_name = encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + if encoded_name == host_name: + return host_id + + if not host_id: + encoded_name = old_encode_host_name(host_name) + host_id = client.get_host_id_by_name(encoded_name) + + return host_id + + +def get_hypermetro_group(client, group_id): + encoded_name = encode_name(group_id) + group = client.get_metrogroup_by_name(encoded_name) + if not group: + encoded_name = old_encode_name(group_id) + group = client.get_metrogroup_by_name(encoded_name) + return group + + +def get_replication_group(client, group_id): + encoded_name = encode_name(group_id) + group = client.get_replication_group_by_name(encoded_name) + if not group: + encoded_name = old_encode_name(group_id) + group = client.get_replication_group_by_name(encoded_name) + return group + + +def get_volume_model_update(volume, **kwargs): + private_data = get_volume_private_data(volume) + + if kwargs.get('hypermetro_id'): + private_data['hypermetro'] = True + elif 'hypermetro_id' in private_data: + private_data.pop('hypermetro_id') + private_data['hypermetro'] = False + + if 'huawei_lun_id' in kwargs: + private_data['huawei_lun_id'] = kwargs['huawei_lun_id'] + if 'huawei_lun_wwn' in kwargs: + private_data['huawei_lun_wwn'] = kwargs['huawei_lun_wwn'] + if 'huawei_sn' in kwargs: + private_data['huawei_sn'] = kwargs['huawei_sn'] + + model_update = {'provider_location': to_string(**private_data)} + + if kwargs.get('replication_id'): + model_update['replication_driver_data'] = to_string( + pair_id=kwargs.get('replication_id')) + model_update['replication_status'] = fields.ReplicationStatus.ENABLED + else: + model_update['replication_driver_data'] = None + model_update['replication_status'] = fields.ReplicationStatus.DISABLED + + return model_update + + +def get_group_type_params(group): + opts = [] + for volume_type in group.volume_types: + opt = get_volume_type_params(volume_type) + opts.append(opt) + return opts + + +def get_hypermetro(client, volume): + lun_name = encode_name(volume.id) + hypermetro = client.get_hypermetro_by_lun_name(lun_name) + return hypermetro + + +def _set_config_info(ini, find_info, tmp_find_info): + if find_info is None and tmp_find_info: + find_info = tmp_find_info + + if ini: + config = ini + elif find_info: + config = find_info + else: + config = {} + return config + + +def find_config_info(config_info, connector=None, initiator=None): + if connector: + ini = config_info['initiators'].get(connector['initiator']) + else: + ini = config_info['initiators'].get(initiator) + + find_info = None + tmp_find_info = None + if not ini: + for item in config_info['initiators']: + ini_info = config_info['initiators'][item] + if ini_info.get('HostName'): + if ini_info.get('HostName') == '*': + tmp_find_info = ini_info + elif re.search(ini_info.get('HostName'), connector['host']): + find_info = ini_info + break + + return _set_config_info(ini, find_info, tmp_find_info) + + +def is_support_clone_pair(client): + array_info = client.get_array_info() + version_info = array_info['PRODUCTVERSION'] + if version_info >= constants.SUPPORT_CLONE_PAIR_VERSION: + return True + + +def need_migrate(volume, host, new_opts, orig_lun_info): + if volume.host != host['host']: + return True + elif ('LUNType' in new_opts and + new_opts['LUNType'] != orig_lun_info['ALLOCTYPE']): + return True + elif (new_opts['compression'] and + not (orig_lun_info.get('ENABLECOMPRESSION') == 'true')): + return True + elif (new_opts['dedup'] and + not (orig_lun_info.get('ENABLESMARTDEDUP') == 'true')): + return True + return False + + +def remove_lun_from_lungroup(client, lun_id): + lun_group_ids = client.get_lungroup_ids_by_lun_id(lun_id) + if lun_group_ids and len(lun_group_ids) == 1: + client.remove_lun_from_lungroup( + lun_group_ids[0], lun_id, constants.LUN_TYPE) diff --git a/Cinder/Train/hypermetro.py b/Cinder/Train/hypermetro.py new file mode 100644 index 0000000..421d697 --- /dev/null +++ b/Cinder/Train/hypermetro.py @@ -0,0 +1,338 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import strutils + +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + + +LOG = logging.getLogger(__name__) + + +class _CheckCreateConditionTask(task.Task): + default_provides = set(('domain_id', 'remote_pool_id')) + + def __init__(self, client, hypermetro_configs, *args, **kwargs): + super(_CheckCreateConditionTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_configs = hypermetro_configs + + def execute(self): + domain_name = self.hypermetro_configs['metro_domain'] + domain_id = self.client.get_hypermetro_domain_id(domain_name) + if not domain_id: + msg = _("Hypermetro domain %s doesn't exist.") % domain_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Get the remote pool info. + hypermetro_pool = self.hypermetro_configs['storage_pools'][0] + pool_id = self.client.get_pool_id(hypermetro_pool) + if not pool_id: + msg = _("Remote pool %s does not exist.") % hypermetro_pool + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return {'domain_id': domain_id, + 'remote_pool_id': pool_id} + + +class _CreateRemoteLunTask(task.Task): + default_provides = set(('remote_lun_id',)) + + def __init__(self, client, *args, **kwargs): + super(_CreateRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_params, remote_pool_id): + # Create remote hypermetro lun. + lun_params['PARENTID'] = remote_pool_id + remote_lun = self.client.create_lun(lun_params) + huawei_utils.wait_lun_online(self.client, remote_lun['ID']) + return {'remote_lun_id': remote_lun['ID']} + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result['remote_lun_id']) + + +class _CreateHyperMetroTask(task.Task): + default_provides = set(('hypermetro_id',)) + + def __init__(self, client, hypermetro_configs, is_sync, *args, **kwargs): + super(_CreateHyperMetroTask, self).__init__(*args, **kwargs) + self.client = client + self.hypermetro_configs = hypermetro_configs + self.sync = is_sync + + def _is_sync_completed(self, metro_id): + metro_info = self.client.get_hypermetro_by_id(metro_id) + if ((metro_info['HEALTHSTATUS'] != constants.METRO_HEALTH_NORMAL) or + metro_info['RUNNINGSTATUS'] not in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC, + constants.RUNNING_TO_BE_SYNC)): + msg = _("HyperMetro pair %(id)s is not in a available status, " + "RunningStatus is: %(run)s, HealthStatus is: %(health)s" + ) % {"id": metro_id, + "run": metro_info.get('RUNNINGSTATUS'), + "health": metro_info.get("HEALTHSTATUS")} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if metro_info.get('RUNNINGSTATUS') == constants.METRO_RUNNING_NORMAL: + return True + return False + + def execute(self, domain_id, local_lun_id, remote_lun_id): + hypermetro_param = {"DOMAINID": domain_id, + "HCRESOURCETYPE": '1', + "ISFIRSTSYNC": False, + "LOCALOBJID": local_lun_id, + "REMOTEOBJID": remote_lun_id, + "SPEED": self.hypermetro_configs['sync_speed']} + if self.sync: + hypermetro_param.update({"ISFIRSTSYNC": True}) + + hypermetro_pair = self.client.create_hypermetro( + hypermetro_param) + if self.sync: + self.client.sync_hypermetro(hypermetro_pair['ID']) + if strutils.bool_from_string( + self.hypermetro_configs['metro_sync_completed']): + huawei_utils.wait_for_condition( + lambda: self._is_sync_completed(hypermetro_pair['ID']), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + + return {'hypermetro_id': hypermetro_pair['ID']} + + +class HuaweiHyperMetro(object): + def __init__(self, local_cli, remote_cli, configs): + self.local_cli = local_cli + self.remote_cli = remote_cli + self.configs = configs + + def create_hypermetro(self, local_lun_id, lun_params, is_sync): + LOG.info('To create hypermetro for local lun %s', local_lun_id) + + store_spec = {'local_lun_id': local_lun_id, + 'lun_params': lun_params} + work_flow = linear_flow.Flow('create_hypermetro') + work_flow.add(_CheckCreateConditionTask(self.remote_cli, self.configs), + _CreateRemoteLunTask(self.remote_cli), + _CreateHyperMetroTask(self.local_cli, self.configs, + is_sync)) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + + return engine.storage.fetch('hypermetro_id') + + def delete_hypermetro(self, volume): + lun_name = huawei_utils.encode_name(volume.id) + hypermetro = self.local_cli.get_hypermetro_by_lun_name(lun_name) + + if hypermetro: + huawei_utils.remove_lun_from_lungroup( + self.remote_cli, hypermetro['REMOTEOBJID']) + if (hypermetro['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(hypermetro['ID']) + + self.local_cli.delete_hypermetro(hypermetro['ID']) + self.remote_cli.delete_lun(hypermetro['REMOTEOBJID']) + else: + remote_lun_info = self.remote_cli.get_lun_info_by_name(lun_name) + if remote_lun_info: + self.remote_cli.delete_lun(remote_lun_info['ID']) + + def extend_hypermetro(self, hypermetro_id, new_size): + LOG.info('Extend hypermetro pair %s', hypermetro_id) + metro_info = self.remote_cli.get_hypermetro_by_id(hypermetro_id) + metrogroup = None + if metro_info['ISINCG'] == 'true': + cg_id = metro_info['CGID'] + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, cg_id) + + if metrogroup: + self._stop_consistencygroup_if_need(metrogroup) + elif ((metro_info['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL) + and metro_info['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(hypermetro_id) + + try: + self.remote_cli.extend_lun(metro_info['LOCALOBJID'], new_size) + self.local_cli.extend_lun(metro_info['REMOTEOBJID'], new_size) + finally: + if metrogroup: + self.local_cli.sync_metrogroup(metrogroup['ID']) + else: + self.local_cli.sync_hypermetro(hypermetro_id) + + def sync_hypermetro(self, hypermetro_id): + self.local_cli.sync_hypermetro(hypermetro_id) + + def create_consistencygroup(self, group_id): + LOG.info("Create hypermetro consistency group %s.", group_id) + + domain_name = self.configs['metro_domain'] + domain_id = self.local_cli.get_hypermetro_domain_id(domain_name) + if not domain_id: + msg = _("Hypermetro domain %s doesn't exist.") % domain_name + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + params = {"NAME": huawei_utils.encode_name(group_id), + "DESCRIPTION": group_id, + "RECOVERYPOLICY": "1", + "SPEED": self.configs['sync_speed'], + "DOMAINID": domain_id} + self.local_cli.create_metrogroup(params) + + def delete_consistencygroup(self, group_id, volumes): + LOG.info("Delete hypermetro consistency group %s.", group_id) + + metrogroup = huawei_utils.get_hypermetro_group(self.local_cli, + group_id) + if not metrogroup: + LOG.warning('Hypermetro group %s to delete not exist.', + group_id) + return + + self._stop_consistencygroup_if_need(metrogroup) + self._remove_volume_from_metrogroup(volumes, metrogroup['ID']) + self.local_cli.delete_metrogroup(metrogroup['ID']) + + def _check_metro_in_group(self, metrogroup_id, metro_id): + metro_info = self.local_cli.get_hypermetro_by_id(metro_id) + return (metro_info and metro_info.get('ISINCG') == 'true' and + metro_info.get('CGID') == metrogroup_id) + + def _ensure_hypermetro_in_group(self, metrogroup_id, metro_ids): + for metro_id in metro_ids: + huawei_utils.wait_for_condition( + lambda: self._check_metro_in_group(metrogroup_id, metro_id), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + + def _ensure_hypermetro_not_in_group(self, metrogroup_id, metro_ids): + for metro_id in metro_ids: + huawei_utils.wait_for_condition( + lambda: not self._check_metro_in_group(metrogroup_id, + metro_id), + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_INTERVAL * 10) + self.local_cli.sync_hypermetro(metro_id) + + def _add_volume_to_metrogroup(self, volumes, metrogroup_id): + metro_ids = [] + for volume in volumes: + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + LOG.warning("Volume %s doesn't have hypermetro.", volume.id) + continue + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + LOG.warning("Volume %s doesn't have hypermetro on the array.", + volume.id) + continue + + metro_id = hypermetro['ID'] + self._stop_hypermetro_if_need(metro_id) + self.local_cli.add_metro_to_metrogroup(metrogroup_id, metro_id) + metro_ids.append(metro_id) + + self._ensure_hypermetro_in_group(metrogroup_id, metro_ids) + + def _remove_volume_from_metrogroup(self, volumes, metrogroup_id): + metro_ids = [] + for volume in volumes: + metadata = huawei_utils.get_volume_private_data(volume) + if not metadata.get('hypermetro'): + LOG.warning("Volume %s doesn't have hypermetro.", volume.id) + continue + + hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume) + if not hypermetro: + LOG.warning("Volume %s doesn't have hypermetro on the array.", + volume.id) + continue + + metro_id = hypermetro['ID'] + self.local_cli.remove_metro_from_metrogroup( + metrogroup_id, metro_id) + metro_ids.append(metro_id) + + self._ensure_hypermetro_not_in_group(metrogroup_id, metro_ids) + + def update_consistencygroup(self, group_id, add_volumes, remove_volumes): + LOG.info("Update hypermetro consistency group %s.", group_id) + + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, group_id) + if not metrogroup: + msg = _('Hypermetro group %s to update not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._stop_consistencygroup_if_need(metrogroup) + self._add_volume_to_metrogroup(add_volumes, metrogroup['ID']) + self._remove_volume_from_metrogroup(remove_volumes, metrogroup['ID']) + self.local_cli.sync_metrogroup(metrogroup['ID']) + + def _stop_consistencygroup_if_need(self, metrogroup): + if (metrogroup['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL and + metrogroup['RUNNINGSTATUS'] in + (constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_metrogroup(metrogroup['ID']) + + def _stop_hypermetro_if_need(self, metro_id): + metro_info = self.local_cli.get_hypermetro_by_id(metro_id) + if metro_info and ( + (metro_info['HEALTHSTATUS'] == constants.METRO_HEALTH_NORMAL) + and metro_info['RUNNINGSTATUS'] in ( + constants.METRO_RUNNING_NORMAL, + constants.METRO_RUNNING_SYNC)): + self.local_cli.stop_hypermetro(metro_id) + + def add_hypermetro_to_group(self, group_id, metro_id): + metrogroup = huawei_utils.get_hypermetro_group( + self.local_cli, group_id) + if not metrogroup: + msg = _('Hypermetro group %s to not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._stop_consistencygroup_if_need(metrogroup) + self._stop_hypermetro_if_need(metro_id) + self.local_cli.add_metro_to_metrogroup(metrogroup['ID'], metro_id) + self.local_cli.sync_metrogroup(metrogroup['ID']) diff --git a/Cinder/Train/replication.py b/Cinder/Train/replication.py new file mode 100644 index 0000000..afc1a2e --- /dev/null +++ b/Cinder/Train/replication.py @@ -0,0 +1,531 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import six + +from oslo_log import log as logging +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task +from taskflow.types import failure + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + + +class BaseReplicationOp(object): + def __init__(self, loc_client, rmt_client): + self.loc_client = loc_client + self.rmt_client = rmt_client + + def _wait_until_status(self, rep_id, expect_statuses): + def _status_check(): + info = self.get_info(rep_id) + if info['HEALTHSTATUS'] != constants.REPLICA_HEALTH_STATUS_NORMAL: + msg = _('Replication status %s is abnormal.' + ) % info['HEALTHSTATUS'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if info['RUNNINGSTATUS'] in expect_statuses: + return True + + return False + + huawei_utils.wait_for_condition(_status_check, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + def _wait_until_role(self, rep_id, is_primary): + def _role_check(): + info = self.get_info(rep_id) + if info['HEALTHSTATUS'] != constants.REPLICA_HEALTH_STATUS_NORMAL: + msg = _('Replication status %s is abnormal.' + ) % info['HEALTHSTATUS'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if info['ISPRIMARY'] == is_primary: + return True + + return False + + huawei_utils.wait_for_condition(_role_check, + constants.DEFAULT_WAIT_INTERVAL, + constants.DEFAULT_WAIT_TIMEOUT) + + def create(self, params): + return self._create(params) + + def delete(self, rep_id): + self._delete(rep_id) + + def sync(self, rep_id, client=None): + if not client: + client = self.loc_client + self._sync(rep_id, client) + + def split(self, rep_id, rep_info=None): + expect_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED) + info = rep_info or self.get_info(rep_id) + if (info.get('ISEMPTY') == 'true' or + info['RUNNINGSTATUS'] in expect_status): + return + + self._split(rep_id) + self._wait_until_status(rep_id, expect_status) + + def switch(self, rep_id): + self._switch(rep_id) + + def unprotect_secondary(self, rep_id): + self._unprotect_secondary(rep_id) + + def protect_secondary(self, rep_id): + self._protect_secondary(rep_id) + + def failover(self, rep_id): + """Failover replication. + + Steps: + 1. Split replication. + 2. Set secondary access readable & writable. + 3. Try to switch replication roles. + """ + self.split(rep_id) + self.unprotect_secondary(rep_id) + try: + self.switch(rep_id) + self._wait_until_role(rep_id, 'true') + self.protect_secondary(rep_id) + self.sync(rep_id, self.rmt_client) + except Exception: + LOG.warning('Switch replication %s roles failed, but secondary ' + 'is readable&writable now.', rep_id) + + def failback(self, rep_id): + """Failback replication. + + Steps: + 1. Switch the role of replication if needed. + 2. Sync original secondary data back to original primary. + 3. Recover original primary&secondary replication relationship. + """ + info = self.get_info(rep_id) + self.split(rep_id, info) + self.unprotect_secondary(rep_id) + + # If remote lun is primary, means the previous failover + # didn't switch the replication roles, so we need to switch + # again to make the original secondary lun primary. + if info['ISPRIMARY'] == 'true': + self.switch(rep_id) + self._wait_until_role(rep_id, 'false') + self.protect_secondary(rep_id) + self.sync(rep_id) + self._wait_until_status( + rep_id, (constants.REPLICA_RUNNING_STATUS_NORMAL,)) + + self.failover(rep_id) + + +class ReplicationPairOp(BaseReplicationOp): + def get_info(self, rep_id): + return self.rmt_client.get_replication_pair_by_id(rep_id) + + def _create(self, params): + return self.loc_client.create_replication_pair(params) + + def _delete(self, rep_id): + return self.loc_client.delete_replication_pair(rep_id) + + def _sync(self, rep_id, client): + client.sync_replication_pair(rep_id) + + def _split(self, rep_id): + self.loc_client.split_replication_pair(rep_id) + + def _switch(self, rep_id): + self.loc_client.switch_replication_pair(rep_id) + + def _unprotect_secondary(self, rep_id): + self.rmt_client.set_replication_pair_second_access( + rep_id, constants.REPLICA_SECOND_RW) + + def _protect_secondary(self, rep_id): + self.rmt_client.set_replication_pair_second_access( + rep_id, constants.REPLICA_SECOND_RO) + + +class ReplicationGroupOp(BaseReplicationOp): + def get_info(self, rep_id): + return self.rmt_client.get_replication_group_by_id(rep_id) + + def _create(self, params): + return self.loc_client.create_replication_group(params) + + def _delete(self, rep_id): + return self.loc_client.delete_replication_group(rep_id) + + def _sync(self, rep_id, client): + client.sync_replication_group(rep_id) + + def _split(self, rep_id): + self.loc_client.split_replication_group(rep_id) + + def _switch(self, rep_id): + self.loc_client.switch_replication_group(rep_id) + + def _unprotect_secondary(self, rep_id): + self.rmt_client.set_replication_group_second_access( + rep_id, constants.REPLICA_SECOND_RW) + + def _protect_secondary(self, rep_id): + self.rmt_client.set_replication_group_second_access( + rep_id, constants.REPLICA_SECOND_RO) + + def add_pair_to_group(self, group_id, pair_id): + return self.loc_client.add_pair_to_replication_group( + group_id, pair_id) + + def remove_pair_from_group(self, group_id, pair_id): + return self.loc_client.remove_pair_from_replication_group( + group_id, pair_id) + + +class _CheckCreateConditionTask(task.Task): + default_provides = set(('rmt_dev_id',)) + + def __init__(self, loc_client, rmt_client, *args, **kwargs): + super(_CheckCreateConditionTask, self).__init__(*args, **kwargs) + self.loc_client = loc_client + self.rmt_client = rmt_client + + def execute(self): + rmt_array = self.rmt_client.get_array_info() + rmt_dev = self.loc_client.get_remote_device_by_wwn(rmt_array['wwn']) + if not rmt_dev: + msg = _("Remote device %s doesn't exist.") % rmt_array['wwn'] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return {'rmt_dev_id': rmt_dev['ID']} + + +class _CreateRemoteLunTask(task.Task): + default_provides = set(('remote_lun_id',)) + + def __init__(self, client, *args, **kwargs): + super(_CreateRemoteLunTask, self).__init__(*args, **kwargs) + self.client = client + + def execute(self, lun_params, rmt_pool): + pool_id = self.client.get_pool_id(rmt_pool) + if not pool_id: + msg = _('Remote pool %s for replication not exist.') % rmt_pool + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_params['PARENTID'] = pool_id + remote_lun = self.client.create_lun(lun_params) + huawei_utils.wait_lun_online(self.client, remote_lun['ID']) + return {'remote_lun_id': remote_lun['ID']} + + def revert(self, result, **kwargs): + if isinstance(result, failure.Failure): + return + self.client.delete_lun(result['remote_lun_id']) + + +class _CreatePairTask(task.Task): + default_provides = set(('pair_id',)) + + def __init__(self, op, configs, *args, **kwargs): + super(_CreatePairTask, self).__init__(*args, **kwargs) + self.op = op + self.configs = configs + + def execute(self, local_lun_id, remote_lun_id, rmt_dev_id, replica_model): + params = { + "LOCALRESID": local_lun_id, + "REMOTEDEVICEID": rmt_dev_id, + "REMOTERESID": remote_lun_id, + "REPLICATIONMODEL": replica_model, + "RECOVERYPOLICY": '1', + "SPEED": self.configs['sync_speed'], + } + + if replica_model == constants.REPLICA_ASYNC_MODEL: + params['SYNCHRONIZETYPE'] = '2' + params['TIMINGVAL'] = constants.REPLICA_PERIOD + + pair_info = self.op.create(params) + self.op.sync(pair_info['ID']) + return {'pair_id': pair_info['ID']} + + +class ReplicationManager(object): + def __init__(self, local_client, rmt_client, configs): + self.loc_client = local_client + self.rmt_client = rmt_client + self.pair_op = ReplicationPairOp(self.loc_client, self.rmt_client) + self.group_op = ReplicationGroupOp(self.loc_client, self.rmt_client) + self.configs = configs + + def create_replica(self, local_lun_id, lun_params, replica_model): + """Create remote LUN and replication pair. + + Purpose: + 1. create remote lun + 2. create replication pair + 3. sync replication pair + """ + LOG.info(('Create replication, local lun: %(local_lun_id)s, ' + 'replication model: %(model)s.'), + {'local_lun_id': local_lun_id, 'model': replica_model}) + + store_spec = {'local_lun_id': local_lun_id, + 'lun_params': lun_params, + 'replica_model': replica_model, + 'rmt_pool': self.configs['storage_pools'][0], + } + + work_flow = linear_flow.Flow('create_replication') + work_flow.add( + _CheckCreateConditionTask(self.loc_client, self.rmt_client), + _CreateRemoteLunTask(self.rmt_client), + _CreatePairTask(self.pair_op, self.configs), + ) + + engine = taskflow.engines.load(work_flow, store=store_spec) + engine.run() + return engine.storage.fetch('pair_id') + + def delete_replica(self, pair_id): + LOG.info('Delete replication pair %s.', pair_id) + try: + pair_info = self.pair_op.get_info(pair_id) + except exception.VolumeBackendAPIException as exc: + if huawei_utils.is_not_exist_exc(exc): + return + raise + + self.pair_op.split(pair_id) + self.pair_op.delete(pair_id) + self.rmt_client.delete_lun(pair_info['LOCALRESID']) + + def extend_replica(self, pair_id, new_size): + LOG.info('Extend replication pair %s', pair_id) + pair_info = self.pair_op.get_info(pair_id) + + cg_info = None + cg_id = None + if pair_info['ISINCG'] == 'true': + cg_id = pair_info['CGID'] + cg_info = self.group_op.get_info(cg_id) + + if cg_info: + self.group_op.split(cg_id, cg_info) + else: + self.pair_op.split(pair_id, pair_info) + + try: + self.rmt_client.extend_lun(pair_info['LOCALRESID'], new_size) + self.loc_client.extend_lun(pair_info['REMOTERESID'], new_size) + finally: + if cg_info: + self.group_op.sync(cg_id) + else: + self.pair_op.sync(pair_id) + + def _pre_fail_check(self, volumes, statuc_check_func): + normal_volumes = [] + pair_ids = [] + group_ids = set() + volume_pair_infos = {} + + for v in volumes: + drv_data = huawei_utils.to_dict(v.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + normal_volumes.append(v.id) + continue + + pair_info = self.pair_op.get_info(pair_id) + volume_pair_infos[v.id] = pair_info + + cg_id = pair_info.get('CGID') + if cg_id: + if cg_id not in group_ids: + group_ids.add(cg_id) + else: + pair_ids.append(pair_id) + + for pair_info in six.itervalues(volume_pair_infos): + if not statuc_check_func(pair_info): + msg = _('Replication pair %(id)s is not at the status ' + 'failover/failback available, RUNNINGSTATUS: %(run)s, ' + 'SECRESDATASTATUS: %(sec)s.' + ) % {'id': pair_info['ID'], + 'run': pair_info['RUNNINGSTATUS'], + 'sec': pair_info['SECRESDATASTATUS']} + LOG.error(msg) + raise exception.InvalidReplicationTarget(reason=msg) + + return normal_volumes, list(group_ids), pair_ids, volume_pair_infos + + def _fail_op(self, volumes, status_check_func, fail_group_func, + fail_pair_func): + (normal_volumes, group_ids, pair_ids, volume_pair_infos + ) = self._pre_fail_check(volumes, status_check_func) + + for group in group_ids: + fail_group_func(group) + + for pair in pair_ids: + fail_pair_func(pair) + + volumes_update = [] + for v in volumes: + if v.id in normal_volumes: + LOG.warning("Volume %s doesn't have replication.", v.id) + continue + + rmt_lun_id = volume_pair_infos[v.id]['LOCALRESID'] + rmt_lun_info = self.rmt_client.get_lun_info_by_id(rmt_lun_id) + location = huawei_utils.to_string( + huawei_lun_id=rmt_lun_id, + huawei_lun_wwn=rmt_lun_info['WWN'], + huawei_sn=self.rmt_client.device_id, + ) + + volume_update = {'volume_id': v.id} + volume_update['updates'] = { + 'provider_location': location, + } + + volumes_update.append(volume_update) + + return volumes_update + + def failback(self, volumes): + """Failback volumes to primary storage.""" + def _status_check_func(pair_info): + return pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED) + + return self._fail_op(volumes, _status_check_func, + self.group_op.failback, self.pair_op.failback) + + def failover(self, volumes): + """Failover volumes to secondary storage.""" + def _status_check_func(pair_info): + return pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLIT, + constants.REPLICA_RUNNING_STATUS_INTERRUPTED + ) and pair_info['SECRESDATASTATUS'] in ( + constants.REPLICA_SECRES_DATA_SYNC, + constants.REPLICA_SECRES_DATA_COMPLETE + ) + + return self._fail_op(volumes, _status_check_func, + self.group_op.failover, self.pair_op.failover) + + def create_group(self, group_id, replica_model): + LOG.info("Create replication group %s.", group_id) + group_name = huawei_utils.encode_name(group_id) + params = {'NAME': group_name, + 'DESCRIPTION': group_id, + 'RECOVERYPOLICY': '1', + 'REPLICATIONMODEL': replica_model, + 'SPEED': self.configs['sync_speed']} + + if replica_model == constants.REPLICA_ASYNC_MODEL: + params['SYNCHRONIZETYPE'] = '2' + params['TIMINGVAL'] = constants.REPLICA_CG_PERIOD + + group = self.group_op.create(params) + return group['ID'] + + def _add_volumes_to_group(self, group_id, volumes): + for volume in volumes: + drv_data = huawei_utils.to_dict(volume.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("Volume %s doesn't have replication.", volume.id) + continue + + self.pair_op.split(pair_id) + self.group_op.add_pair_to_group(group_id, pair_id) + + def _remove_volumes_from_group(self, group_id, volumes): + for volume in volumes: + drv_data = huawei_utils.to_dict(volume.replication_driver_data) + pair_id = drv_data.get('pair_id') + if not pair_id: + LOG.warning("Volume %s doesn't have replication.", volume.id) + continue + + self.group_op.remove_pair_from_group(group_id, pair_id) + self.pair_op.sync(pair_id) + + def delete_group(self, group_id, volumes): + LOG.info("Delete replication group %s.", group_id) + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + LOG.warning('Replication group %s to delete not exist.', + group_id) + return + + self.group_op.split(group_info['ID'], group_info) + self._remove_volumes_from_group(group_info['ID'], volumes) + self.group_op.delete(group_info['ID']) + + def update_group(self, group_id, add_volumes, remove_volumes): + LOG.info("Update replication group %s.", group_id) + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + LOG.warning('Replication group %s to update not exist.', + group_id) + return + + self.group_op.split(group_info['ID'], group_info) + self._add_volumes_to_group(group_info['ID'], add_volumes) + self._remove_volumes_from_group(group_info['ID'], remove_volumes) + self.group_op.sync(group_info['ID']) + + def add_replication_to_group(self, group_id, pair_id): + group_info = huawei_utils.get_replication_group( + self.loc_client, group_id) + if not group_info: + msg = _('Replication group %s not exist.') % group_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.group_op.split(group_info['ID'], group_info) + self.pair_op.split(pair_id) + self.group_op.add_pair_to_group(group_info['ID'], pair_id) + self.group_op.sync(group_info['ID']) diff --git a/Cinder/Train/rest_client.py b/Cinder/Train/rest_client.py new file mode 100644 index 0000000..fb034c5 --- /dev/null +++ b/Cinder/Train/rest_client.py @@ -0,0 +1,1552 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import inspect +import json +import requests +import six +import sys +import threading +import time + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.huawei import constants + +from oslo_concurrency import lockutils +from oslo_log import log as logging +from requests.adapters import HTTPAdapter + + +LOG = logging.getLogger(__name__) + + +def _error_code(result): + return result['error']['code'] + + +# To limit the requests concurrently sent to array +_semaphore = threading.Semaphore(20) + + +def obj_operation_wrapper(func): + @functools.wraps(func) + def wrapped(self, url_format=None, **kwargs): + url = self._obj_url + if url_format: + url += url_format % kwargs + + _semaphore.acquire() + + try: + result = func(self, url, **kwargs) + except requests.HTTPError as exc: + return {"error": {"code": exc.response.status_code, + "description": six.text_type(exc)}} + finally: + _semaphore.release() + + return result + + return wrapped + + +class CommonObject(object): + def __init__(self, client): + self.client = client + + @obj_operation_wrapper + def post(self, url, **kwargs): + return self.client.post(url, **kwargs) + + @obj_operation_wrapper + def put(self, url, **kwargs): + return self.client.put(url, **kwargs) + + @obj_operation_wrapper + def delete(self, url, **kwargs): + return self.client.delete(url, **kwargs) + + @obj_operation_wrapper + def get(self, url, **kwargs): + return self.client.get(url, **kwargs) + + +def _assert_result(result, msg_format, *args): + if _error_code(result) != 0: + args += (result,) + msg = (msg_format + '\nresult: %s.') % args + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + +class Lun(CommonObject): + _obj_url = '/lun' + + def create_lun(self, lun_params): + # Set the mirror switch always on + lun_params['MIRRORPOLICY'] = '1' + result = self.post(data=lun_params) + _assert_result(result, 'Create lun %s error.', lun_params) + return result['data'] + + def create_lunclone(self, src_id, lun_name): + data = { + "CLONESOURCEID": src_id, + "ISCLONE": True, + "NAME": lun_name, + } + result = self.post(data=data) + _assert_result(result, 'Create clone lun for source ID %s error.', + src_id) + return result['data'] + + def delete_lun(self, lun_id): + result = self.delete('/%(lun)s', lun=lun_id) + if _error_code(result) == constants.ERROR_LUN_NOT_EXIST: + LOG.warning("LUN %s to delete does not exist.", lun_id) + return + _assert_result(result, 'Delete lun %s error.', lun_id) + + def get_lun_info_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get lun info by name %s error.', name) + if result.get('data'): + return result['data'][0] + + def update_lun(self, lun_id, data): + result = self.put('/%(id)s', id=lun_id, data=data) + _assert_result(result, 'Update lun %s properties %s error.', + lun_id, data) + + def extend_lun(self, lun_id, new_size): + data = {'ID': lun_id, + 'CAPACITY': new_size} + result = self.put('/expand', data=data) + _assert_result(result, 'Extend lun %s capacity error.', lun_id) + + def add_lun_to_partition(self, lun_id, partition_id): + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.post('/associate/cachepartition', data=data) + _assert_result(result, 'Add lun %s to partition %s error.', + lun_id, partition_id) + + def remove_lun_from_partition(self, lun_id, partition_id): + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.delete('/associate/cachepartition', data=data) + _assert_result(result, 'Remove lun %s from partition %s error.', + lun_id, partition_id) + + def rename_lun(self, lun_id, new_name, description=None): + data = {"NAME": new_name} + if description: + data.update({"DESCRIPTION": description}) + result = self.put('/%(id)s', id=lun_id, data=data) + _assert_result(result, 'Rename lun %s to %s error.', lun_id, new_name) + + def get_lun_count_of_lungroup(self, lungroup_id): + result = self.get("/count?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%(id)s", + id=lungroup_id) + _assert_result(result, 'Get lun count of lungroup %s error.', + lungroup_id) + return int(result['data']['COUNT']) + + def get_lun_info_by_id(self, lun_id): + result = self.get("/%(id)s", id=lun_id) + _assert_result(result, 'Get lun info by id %s error.', lun_id) + return result['data'] + + def get_lun_info_filter_id(self, lun_id): + result = self.get("?filter=ID::%(lun_id)s", lun_id=lun_id) + _assert_result(result, 'Get lun info filter id %s error.', lun_id) + if result.get('data'): + return result['data'][0] + + def get_lun_host_lun_id(self, host_id, lun_id): + result = self.get( + "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s", id=host_id) + _assert_result(result, 'Get lun info related to host %s error.', + host_id) + + for item in result.get('data', []): + if lun_id == item['ID']: + metadata = json.loads(item['ASSOCIATEMETADATA']) + return metadata['HostLUNID'] + + +class StoragePool(CommonObject): + _obj_url = '/storagepool' + + def get_all_pools(self): + result = self.get() + _assert_result(result, 'Query storage pools error.') + return result.get('data', []) + + def get_pool_id(self, pool_name): + result = self.get('?filter=NAME::%(name)s', name=pool_name) + _assert_result(result, 'Query storage pool by name %s error.', + pool_name) + if result.get('data'): + return result['data'][0]['ID'] + + def get_pool_by_name(self, pool_name): + result = self.get('?filter=NAME::%(name)s', name=pool_name, + log_filter=True) + _assert_result(result, 'Query storage pool by name %s error.', + pool_name) + if result.get('data'): + return result['data'][0] + + +class Snapshot(CommonObject): + _obj_url = '/snapshot' + + def activate_snapshot(self, snapshot_ids): + if isinstance(snapshot_ids, list): + data = {"SNAPSHOTLIST": snapshot_ids} + else: + data = {"SNAPSHOTLIST": [snapshot_ids]} + result = self.post('/activate', data=data) + _assert_result(result, 'Activate snapshots %s error.', snapshot_ids) + + def create_snapshot(self, lun_id, snapshot_name, snapshot_description): + data = {"NAME": snapshot_name, + "DESCRIPTION": snapshot_description, + "PARENTID": lun_id} + result = self.post(data=data) + _assert_result(result, 'Create snapshot %s for lun %s error.', + snapshot_name, lun_id) + return result['data'] + + def stop_snapshot(self, snapshot_id): + data = {"ID": snapshot_id} + result = self.put('/stop', data=data) + _assert_result(result, 'Stop snapshot %s error.', snapshot_id) + + def delete_snapshot(self, snapshot_id): + result = self.delete('/%(id)s', id=snapshot_id) + if _error_code(result) == constants.SNAPSHOT_NOT_EXIST: + LOG.warning('Snapshot %s to delete not exist.', snapshot_id) + return + _assert_result(result, 'Delete snapshot %s error.', snapshot_id) + + def get_snapshot_info_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get snapshot info by name %s error.', name) + if 'data' in result and result['data']: + return result['data'][0] + + def get_snapshot_info_by_id(self, snapshot_id): + result = self.get('/%(id)s', id=snapshot_id) + _assert_result(result, 'Get snapshot info by id %s error.', + snapshot_id) + return result['data'] + + def update_snapshot(self, snapshot_id, data): + result = self.put('/%(id)s', id=snapshot_id, data=data) + _assert_result(result, 'Update snapshot %s error.', snapshot_id) + + def get_snapshot_count_of_lungroup(self, lungroup_id): + result = self.get("/count?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%(id)s", + id=lungroup_id) + _assert_result(result, 'Get snapshot count of lungroup %s error.', + lungroup_id) + return int(result['data']['COUNT']) + + def get_snapshot_host_lun_id(self, host_id, snap_id): + result = self.get( + "/associate?ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%(id)s", id=host_id) + _assert_result(result, 'Get snapshot info related to host %s error.', + host_id) + + for item in result.get('data', []): + if snap_id == item['ID']: + metadata = json.loads(item['ASSOCIATEMETADATA']) + return metadata['HostLUNID'] + + +class LunCopy(CommonObject): + _obj_url = '/LUNCOPY' + + def create_luncopy(self, luncopyname, srclunid, tgtlunid, copy_speed): + param_format = "INVALID;%s;INVALID;INVALID;INVALID" + data = {"NAME": luncopyname, + "COPYSPEED": copy_speed, + "SOURCELUN": param_format % srclunid, + "TARGETLUN": param_format % tgtlunid} + result = self.post(data=data) + _assert_result(result, 'Create luncopy %s error.', luncopyname) + + return result['data']['ID'] + + def start_luncopy(self, luncopy_id): + data = {"ID": luncopy_id} + result = self.put('/start', data=data) + _assert_result(result, 'Start LUNCOPY %s error.', luncopy_id) + + def stop_luncopy(self, luncopy_id): + data = {"ID": luncopy_id} + result = self.put('/stop', data=data) + if _error_code(result) in (constants.LUNCOPY_ALREADY_STOPPED, + constants.LUNCOPY_COMPLETED): + LOG.warning('Luncopy %s already stopped or completed.', luncopy_id) + return + _assert_result(result, 'Stop LUNCOPY %s error.', luncopy_id) + + def get_luncopy_info(self, luncopy_id): + result = self.get('/%(id)s', id=luncopy_id) + _assert_result(result, 'Get LUNCOPY %s error.', luncopy_id) + return result.get('data', {}) + + def delete_luncopy(self, luncopy_id): + result = self.delete('/%(id)s', id=luncopy_id) + if _error_code(result) == constants.LUNCOPY_NOT_EXIST: + LOG.warning('Luncopy %s to delete not exist.', luncopy_id) + return + _assert_result(result, 'Delete LUNCOPY %s error.', luncopy_id) + + +class Host(CommonObject): + _obj_url = '/host' + + def get_host_id_by_name(self, host_name): + result = self.get('?filter=NAME::%(name)s', name=host_name) + _assert_result(result, 'Get host by name %s error.', host_name) + if result.get('data'): + return result['data'][0]['ID'] + + def create_host(self, hostname, orig_host_name): + data = {"NAME": hostname, + "OPERATIONSYSTEM": "0", + "DESCRIPTION": orig_host_name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + return self.get_host_id_by_name(hostname) + + _assert_result(result, 'Add host %s error.', hostname) + return result['data']['ID'] + + def delete_host(self, host_id): + result = self.delete('/%(id)s', id=host_id) + if _error_code(result) == constants.HOST_NOT_EXIST: + LOG.warning('Host %s to delete not exist.', host_id) + return + _assert_result(result, 'Delete host %s error.', host_id) + + def remove_host_from_hostgroup(self, hostgroup_id, host_id): + result = self.delete('/associate?ID=%(gid)s&ASSOCIATEOBJTYPE=21&' + 'ASSOCIATEOBJID=%(hid)s', + gid=hostgroup_id, hid=host_id) + if _error_code(result) == constants.HOST_NOT_IN_HOSTGROUP: + LOG.warning('Host %s not in hostgroup %s.', host_id, hostgroup_id) + return + _assert_result(result, 'Remove host %s from host group %s error.', + host_id, hostgroup_id) + + +class PortGroup(CommonObject): + _obj_url = '/portgroup' + + def get_portgroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get portgroup in mappingview %s error', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def create_portgroup(self, portg_name): + data = {"NAME": portg_name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Portgroup %s to create already exist.', portg_name) + portgroup = self.get_portgroup_by_name(portg_name) + if portgroup: + return portgroup['ID'] + + _assert_result(result, 'Create portgroup %s error.', portg_name) + return result['data']['ID'] + + def delete_portgroup(self, portgroup_id): + result = self.delete('/%(id)s', id=portgroup_id) + if _error_code(result) == constants.PORTGROUP_NOT_EXIST: + LOG.warning('Portgroup %s to delete not exist.', portgroup_id) + return + _assert_result(result, 'Delete portgroup %s error.', portgroup_id) + + def get_portgroup_by_name(self, portg_name): + result = self.get('?filter=NAME::%(name)s', name=portg_name) + _assert_result(result, 'Get portgroup by name %s error.', portg_name) + if 'data' in result and result['data']: + return result['data'][0] + + def get_portgroup_by_port_id(self, port_id, port_type): + result = self.get("/associate?ASSOCIATEOBJTYPE=%(type)s&" + "ASSOCIATEOBJID=%(id)s", id=port_id, type=port_type) + _assert_result(result, 'Get portgroup by port %s error.', port_id) + return [group['ID'] for group in result.get("data", [])] + + +class HostGroup(CommonObject): + _obj_url = '/hostgroup' + + def get_hostgroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get hostgroup in mappingview %s error.', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def associate_host_to_hostgroup(self, hostgroup_id, host_id): + data = {"ID": hostgroup_id, + "ASSOCIATEOBJTYPE": "21", + "ASSOCIATEOBJID": host_id} + result = self.post('/associate', data=data) + if _error_code(result) == constants.HOST_ALREADY_IN_HOSTGROUP: + LOG.info('Object %(id)s already in hostgroup %(group)s.', + {'id': host_id, 'group': hostgroup_id}) + return + _assert_result(result, 'Associate host %s to hostgroup %s error.', + host_id, hostgroup_id) + + def create_hostgroup(self, name): + data = {'NAME': name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Hostgroup %s to create already exists.', name) + hostgroup = self.get_hostgroup_by_name(name) + return hostgroup['ID'] if hostgroup else None + _assert_result(result, 'Create hostgroup %s error.', name) + return result['data']['ID'] + + def delete_hostgroup(self, hostgroup_id): + result = self.delete('/%(id)s', id=hostgroup_id) + if _error_code(result) == constants.HOSTGROUP_NOT_EXIST: + LOG.info('Hostgroup %s to delete not exist.', hostgroup_id) + return + _assert_result(result, 'Delete hostgroup %s error.', hostgroup_id) + + def get_hostgroup_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get hostgroup by %s error.', name) + if 'data' in result and result['data']: + return result['data'][0] + + +class LunGroup(CommonObject): + _obj_url = '/lungroup' + + def associate_lun_to_lungroup(self, lungroup_id, obj_id, obj_type): + data = {"ID": lungroup_id, + "ASSOCIATEOBJTYPE": obj_type, + "ASSOCIATEOBJID": obj_id} + result = self.post('/associate', data=data) + if _error_code(result) == constants.OBJECT_ID_NOT_UNIQUE: + LOG.info('Object %(id)s already in lungroup %(group)s.', + {'id': obj_id, 'group': lungroup_id}) + return + _assert_result(result, 'Associate obj %s to lungroup %s error.', + obj_id, lungroup_id) + + def remove_lun_from_lungroup(self, lungroup_id, obj_id, obj_type): + result = self.delete( + "/associate?ID=%(lungroup_id)s&ASSOCIATEOBJTYPE=%(obj_type)s&" + "ASSOCIATEOBJID=%(obj_id)s", lungroup_id=lungroup_id, + obj_id=obj_id, obj_type=obj_type) + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('LUN %(lun)s not exist in lungroup %(gp)s.', + {'lun': obj_id, 'gp': lungroup_id}) + return + _assert_result(result, 'Remove lun %s from lungroup %s error.', + obj_id, lungroup_id) + + def get_lungroup_in_mappingview(self, view_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=245&' + 'ASSOCIATEOBJID=%(id)s', id=view_id) + _assert_result(result, 'Get lungroup in mappingview %s error.', + view_id) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def get_lungroup_by_name(self, lungroup_name): + """Get the given hostgroup id.""" + result = self.get('?filter=NAME::%(name)s', name=lungroup_name) + _assert_result(result, 'Get lungroup info by name %s error.', + lungroup_name) + if 'data' in result and result['data']: + return result['data'][0] + + def create_lungroup(self, lungroup_name): + data = {"APPTYPE": '0', + "NAME": lungroup_name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Lungroup %s to create already exists.', lungroup_name) + lungroup = self.get_lungroup_by_name(lungroup_name) + return lungroup['ID'] if lungroup else None + + _assert_result(result, 'Create lungroup %s error.', lungroup_name) + return result['data']['ID'] + + def delete_lungroup(self, lungroup_id): + result = self.delete('/%(id)s', id=lungroup_id) + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('Lungroup %s to delete not exist.', lungroup_id) + return + _assert_result(result, 'Delete lungroup %s error.', lungroup_id) + + def get_lungroup_ids_by_lun_id(self, lun_id, lun_type=constants.LUN_TYPE): + result = self.get('/associate?TYPE=256&ASSOCIATEOBJTYPE=%(type)s&' + 'ASSOCIATEOBJID=%(id)s', type=lun_type, id=lun_id) + _assert_result(result, 'Get lungroup id by lun id %s error.', lun_id) + + lungroup_ids = [] + if 'data' in result: + for item in result['data']: + lungroup_ids.append(item['ID']) + + return lungroup_ids + + +class IscsiInitiator(CommonObject): + _obj_url = '/iscsi_initiator' + + def add_iscsi_initiator(self, initiator): + data = {'ID': initiator} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_ID_NOT_UNIQUE: + LOG.info('iscsi initiator %s already exists.', initiator) + return + _assert_result(result, 'Add iscsi initiator %s error.', initiator) + + def associate_iscsi_initiator_to_host(self, initiator, host_id, alua_info): + data = { + "PARENTTYPE": "21", + "PARENTID": host_id, + } + data.update(alua_info) + + result = self.put('/%(ini)s', data=data, ini=initiator) + _assert_result(result, 'Add initiator %s to host %s error.', + initiator, host_id) + + def update_iscsi_initiator_chap(self, initiator, chap_info): + if chap_info: + data = {"USECHAP": "true", + "CHAPNAME": chap_info['CHAPNAME'], + "CHAPPASSWORD": chap_info['CHAPPASSWORD']} + else: + data = {"USECHAP": "false"} + + result = self.put('/%(ini)s', data=data, ini=initiator) + _assert_result(result, 'Update initiator %s chap error.', initiator) + + def remove_iscsi_initiator_from_host(self, initiator): + data = {"ID": initiator} + result = self.put('/remove_iscsi_from_host', data=data) + if _error_code(result) == constants.INITIATOR_NOT_IN_HOST: + LOG.warning('ISCSI initiator %s not in host.', initiator) + return + _assert_result(result, 'Remove iscsi initiator %s from host error.', + initiator) + + def get_host_iscsi_initiators(self, host_id): + result = self.get('?PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get iscsi initiators of host %s error.', + host_id) + initiators = [] + for item in result.get('data', []): + initiators.append(item['ID']) + return initiators + + def get_iscsi_initiator(self, initiator): + result = self.get('/%(id)s', id=initiator) + _assert_result(result, 'Get iscsi initiator %s error.', initiator) + return result['data'] + + +class MappingView(CommonObject): + _obj_url = '/mappingview' + + def get_mappingview_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Find mapping view by name %s error', name) + if 'data' in result and result['data']: + return result['data'][0] + + def create_mappingview(self, name): + data = {"NAME": name} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_NAME_ALREADY_EXIST: + LOG.info('Mappingview %s to create already exists.', name) + mappingview = self.get_mappingview_by_name(name) + return mappingview['ID'] if mappingview else None + _assert_result(result, 'Create mappingview by name %s error.', name) + return result['data']['ID'] + + def _associate_group_to_mappingview(self, view_id, group_id, group_type): + data = {"ASSOCIATEOBJTYPE": group_type, + "ASSOCIATEOBJID": group_id, + "ID": view_id} + result = self.put('/create_associate', data=data) + if _error_code(result) in (constants.HOSTGROUP_ALREADY_IN_MAPPINGVIEW, + constants.PORTGROUP_ALREADY_IN_MAPPINGVIEW, + constants.LUNGROUP_ALREADY_IN_MAPPINGVIEW): + LOG.warning('Group %(group_id)s of type %(type)s already exist ' + 'in mappingview %(view_id)s.', + {'group_id': group_id, 'type': group_type, + 'view_id': view_id}) + return + _assert_result(result, 'Associate group %s to mappingview %s error.', + group_id, view_id) + + def associate_hostgroup_to_mappingview(self, view_id, hostgroup_id): + self._associate_group_to_mappingview(view_id, hostgroup_id, '14') + + def associate_lungroup_to_mappingview(self, view_id, lungroup_id): + self._associate_group_to_mappingview(view_id, lungroup_id, '256') + + def associate_portgroup_to_mappingview(self, view_id, portgroup_id): + self._associate_group_to_mappingview(view_id, portgroup_id, '257') + + def _remove_group_from_mappingview(self, view_id, group_id, group_type): + data = {"ASSOCIATEOBJTYPE": group_type, + "ASSOCIATEOBJID": group_id, + "ID": view_id} + result = self.put('/remove_associate', data=data) + if _error_code(result) in (constants.HOSTGROUP_NOT_IN_MAPPINGVIEW, + constants.PORTGROUP_NOT_IN_MAPPINGVIEW, + constants.LUNGROUP_NOT_IN_MAPPINGVIEW): + LOG.warning('Group %(group_id)s of type %(type)s not exist in ' + 'mappingview %(view_id)s.', + {'group_id': group_id, 'type': group_type, + 'view_id': view_id}) + return + _assert_result(result, 'Remove group %s from mappingview %s error.', + group_id, view_id) + + def remove_lungroup_from_mappingview(self, view_id, lungroup_id): + self._remove_group_from_mappingview(view_id, lungroup_id, '256') + + def remove_hostgroup_from_mappingview(self, view_id, hostgroup_id): + self._remove_group_from_mappingview(view_id, hostgroup_id, '14') + + def remove_portgroup_from_mappingview(self, view_id, portgroup_id): + self._remove_group_from_mappingview(view_id, portgroup_id, '257') + + def delete_mapping_view(self, view_id): + result = self.delete('/%(id)s', id=view_id) + if _error_code(result) == constants.MAPPINGVIEW_NOT_EXIST: + LOG.warning('Mappingview %s to delete not exist.', view_id) + return + _assert_result(result, 'Delete mappingview %s error.', view_id) + + def change_hostlun_id(self, view_id, lun_id, hostlun_id): + data = {"ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id, + "ASSOCIATEMETADATA": [ + {"LUNID": lun_id, + "hostLUNId": six.text_type(hostlun_id)}] + } + result = self.put('/%(id)s', id=view_id, data=data) + _assert_result(result, 'Change hostlun id for lun %s in mappingview ' + '%s error.', lun_id, view_id) + + def get_mappingview_by_id(self, view_id): + result = self.get('/%(id)s', id=view_id) + _assert_result(result, 'Get mappingview info by id %s error.', + view_id) + return result["data"] + + def get_mappingview_by_portgroup_id(self, portgroup_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=257&' + 'ASSOCIATEOBJID=%(id)s', id=portgroup_id) + _assert_result(result, 'Get mappingviews by portgroup %s error.', + portgroup_id) + return [view['ID'] for view in result.get("data", [])] + + +class FCInitiator(CommonObject): + _obj_url = '/fc_initiator' + + def get_fc_initiator_count(self): + result = self.get("/count") + _assert_result(result, 'Get FC initiator count error.') + return int(result['data']['COUNT']) + + def _get_fc_initiator(self, start, end): + result = self.get("?range=[%(start)s-%(end)s]", start=start, end=end) + _assert_result(result, 'Get online free FC wwn error.') + + totals = [] + frees = [] + for item in result.get('data', []): + totals.append(item['ID']) + if item['RUNNINGSTATUS'] == '27' and item['ISFREE'] == 'true': + frees.append(item['ID']) + return totals, frees + + def get_fc_initiators(self): + fc_initiator_count = self.get_fc_initiator_count() + totals = [] + frees = [] + range_start = 0 + + while fc_initiator_count > 0: + range_end = range_start + constants.GET_PATCH_NUM + _totals, _frees = self._get_fc_initiator(range_start, range_end) + totals += _totals + frees += _frees + fc_initiator_count -= constants.GET_PATCH_NUM + range_start += constants.GET_PATCH_NUM + return totals, frees + + def add_fc_initiator(self, initiator): + data = {'ID': initiator} + result = self.post(data=data) + if _error_code(result) == constants.OBJECT_ID_NOT_UNIQUE: + LOG.info('FC initiator %s already exists.', initiator) + return + _assert_result(result, 'Add FC initiator %s error.', initiator) + + def associate_fc_initiator_to_host(self, host_id, wwn, alua_info): + data = { + "PARENTTYPE": 21, + "PARENTID": host_id, + } + data.update(alua_info) + + result = self.put('/%(id)s', data=data, id=wwn) + _assert_result(result, 'Add FC initiator %s to host %s error.', + wwn, host_id) + + def get_host_fc_initiators(self, host_id): + result = self.get('?PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get FC initiators of host %s error.', + host_id) + return [item['ID'] for item in result.get('data', [])] + + def remove_fc_initiator_from_host(self, initiator): + data = {"ID": initiator} + result = self.put('/remove_fc_from_host', data=data) + if _error_code(result) == constants.INITIATOR_NOT_IN_HOST: + LOG.warning('FC initiator %s not in host.', initiator) + return + _assert_result(result, 'Remove fc initiator %s from host error.', + initiator) + + +class HostLink(CommonObject): + _obj_url = '/host_link' + + def get_fc_target_wwpns(self, ini): + result = self.get('?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=%(wwn)s', + wwn=ini) + _assert_result(result, 'Get FC target wwn for initiator %s error.', + ini) + return [fc['TARGET_PORT_WWN'] for fc in result.get('data', [])] + + def get_host_link(self, host_id): + result = self.get('?INITIATOR_TYPE=223&PARENTID=%(id)s', id=host_id) + _assert_result(result, 'Get host link for host %s error.', host_id) + return result.get('data', []) + + +class IOClass(CommonObject): + _obj_url = '/ioclass' + + def create_qos(self, qos, lun_id): + localtime = time.strftime('%Y%m%d%H%M%S', time.localtime()) + qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime + + data = {"NAME": qos_name, + "LUNLIST": [lun_id], + "CLASSTYPE": "1", + "SCHEDULEPOLICY": "2", + "SCHEDULESTARTTIME": "1410969600", + "STARTTIME": "08:00", + "DURATION": "86400", + "CYCLESET": "[1,2,3,4,5,6,0]", + } + data.update(qos) + + result = self.post(data=data) + _assert_result(result, 'Create QoS policy %s error.', qos) + return result['data']['ID'] + + def delete_qos(self, qos_id): + result = self.delete('/%(id)s', id=qos_id) + _assert_result(result, 'Delete QoS policy %s error.', qos_id) + + def activate_deactivate_qos(self, qos_id, enablestatus): + """Activate or deactivate QoS. + + enablestatus: true (activate) + enbalestatus: false (deactivate) + """ + data = {"ID": qos_id, + "ENABLESTATUS": enablestatus} + result = self.put('/active', data=data) + _assert_result(result, 'Change QoS %s to status %s error.', + qos_id, enablestatus) + + def get_qos_info(self, qos_id): + result = self.get('/%(id)s', id=qos_id) + _assert_result(result, 'Get QoS %s info error.', qos_id) + return result['data'] + + def get_all_qos(self): + result = self.get() + _assert_result(result, 'Get all QoS information error.') + return result.get('data', []) + + def update_qos_luns(self, qos_id, lun_list): + """Add lun to QoS.""" + data = {"LUNLIST": lun_list} + result = self.put('/%(qos_id)s', data=data, qos_id=qos_id) + _assert_result(result, 'Update lun list %s to QoS %s error.', + lun_list, qos_id) + + +class EthPort(CommonObject): + _obj_url = '/eth_port' + + def get_eth_ports_in_portgroup(self, portgroup_id): + result = self.get("/associate?ASSOCIATEOBJTYPE=257&" + "ASSOCIATEOBJID=%(id)s", id=portgroup_id) + _assert_result(result, 'Get eth ports in portgroup %s error.', + portgroup_id) + return result.get("data", []) + + +class IscsiTgtPort(CommonObject): + _obj_url = '/iscsi_tgt_port' + + def get_iscsi_tgt_ports(self): + result = self.get() + _assert_result(result, "Get iscsi target ports info error.") + return result.get('data', []) + + +class LunMigration(CommonObject): + _obj_url = '/lun_migration' + + def create_lun_migration(self, src_id, dst_id): + data = {"PARENTID": src_id, + "TARGETLUNID": dst_id, + "SPEED": '2', + "WORKMODE": 0} + + result = self.post(data=data) + _assert_result(result, 'Create migration from %s to %s error.', + src_id, dst_id) + return result['data'] + + def get_lun_migration(self, migration_id): + result = self.get('/%(id)s', id=migration_id) + _assert_result(result, 'Get migration info %s error.', migration_id) + return result['data'] + + def delete_lun_migration(self, migration_id): + result = self.delete('/%(id)s', id=migration_id) + if _error_code(result) == constants.MIGRATION_NOT_EXIST: + LOG.warning('Migration %s to delete not exist.', migration_id) + return + _assert_result(result, 'Delete migration %s error.', migration_id) + + +class CachePartition(CommonObject): + _obj_url = '/cachepartition' + + def get_partition_id_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get partition by name %s error.', name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0]['ID'] + + def get_partition_info_by_id(self, partition_id): + result = self.get('/%(id)s', id=partition_id) + _assert_result(result, 'Get partition info by id %s error.', + partition_id) + return result['data'] + + +class SmartCachePartition(CommonObject): + _obj_url = '/smartcachepartition' + + def get_cache_id_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get smartcachepartition by name %s error.', + name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0]['ID'] + + def get_cache_info_by_id(self, cacheid): + result = self.get('/%(id)s', id=cacheid) + _assert_result(result, 'Get smartcachepartition by id %s error.', + cacheid) + return result['data'] + + def remove_lun_from_cache(self, lun_id, cache_id): + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + + result = self.put('/remove_associate', data=data) + _assert_result(result, 'Remove lun %s from smartcachepartition ' + '%s error.', lun_id, cache_id) + + def add_lun_to_cache(self, lun_id, cache_id): + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 11, + "ASSOCIATEOBJID": lun_id} + result = self.put('/create_associate', data=data) + _assert_result(result, 'Add lun %s to smartcachepartition ' + '%s error.', lun_id, cache_id) + + +class FCPort(CommonObject): + _obj_url = '/fc_port' + + def get_fc_ports(self): + result = self.get() + _assert_result(result, 'Get FC ports from array error.') + return result.get('data', []) + + def get_fc_ports_in_portgroup(self, portgroup_id): + result = self.get('/associate?ASSOCIATEOBJTYPE=257' + '&ASSOCIATEOBJID=%(id)s', id=portgroup_id) + _assert_result(result, 'Get FC ports in portgroup %s error.', + portgroup_id) + return result.get("data", []) + + +class HyperMetroDomain(CommonObject): + _obj_url = '/HyperMetroDomain' + + def get_hypermetro_domain_id(self, domain_name): + result = self.get('?range=[0-32]') + _assert_result(result, 'Get hyper metro domains info error.') + for item in result.get('data', []): + if domain_name == item['NAME']: + return item['ID'] + + +class HyperMetroPair(CommonObject): + _obj_url = '/HyperMetroPair' + + def create_hypermetro(self, hcp_param): + result = self.post(data=hcp_param) + _assert_result(result, 'Create hypermetro pair %s error.', hcp_param) + return result['data'] + + def delete_hypermetro(self, metro_id): + result = self.delete('/%(id)s', id=metro_id) + if _error_code(result) == constants.HYPERMETRO_NOT_EXIST: + LOG.warning('Hypermetro %s to delete not exist.', metro_id) + return + _assert_result(result, 'Delete hypermetro %s error.', metro_id) + + def sync_hypermetro(self, metro_id): + data = {"ID": metro_id} + result = self.put('/synchronize_hcpair', data=data) + _assert_result(result, 'Sync hypermetro %s error.', metro_id) + + def stop_hypermetro(self, hypermetro_id): + data = {"ID": hypermetro_id} + result = self.put('/disable_hcpair', data=data) + _assert_result(result, 'Stop hypermetro %s error.', hypermetro_id) + + def get_hypermetro_by_id(self, metro_id): + result = self.get('?filter=ID::%(id)s', id=metro_id) + _assert_result(result, 'Get hypermetro by id %s error.', metro_id) + if result.get('data'): + return result['data'][0] + + def get_hypermetro_by_lun_name(self, lun_name): + result = self.get('?filter=LOCALOBJNAME::%(name)s', name=lun_name) + _assert_result(result, 'Get hypermetro by local lun name' + ' %s error.', lun_name) + if result.get('data'): + return result['data'][0] + + +class HyperMetroConsistentGroup(CommonObject): + _obj_url = '/HyperMetro_ConsistentGroup' + + def get_metrogroup_by_name(self, name): + result = self.get('?filter=NAME::%(name)s', name=name) + _assert_result(result, 'Get hypermetro group by name %s error.', name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0] + + def create_metrogroup(self, group_params): + result = self.post(data=group_params) + _assert_result(result, 'Create hypermetro group %s error.', + group_params) + + def delete_metrogroup(self, metrogroup_id): + result = self.delete('/%(id)s', id=metrogroup_id) + if _error_code(result) == constants.HYPERMETROGROUP_NOT_EXIST: + LOG.warning('Hypermetro group %s to delete not exist.', + metrogroup_id) + return + _assert_result(result, 'Delete hypermetro group %s error.', + metrogroup_id) + + def stop_metrogroup(self, metrogroup_id): + data = {"ID": metrogroup_id} + result = self.put('/stop', data=data) + _assert_result(result, 'Stop hypermetro group %s error.', + metrogroup_id) + + def sync_metrogroup(self, metrogroup_id): + data = {"ID": metrogroup_id} + result = self.put('/sync', data=data) + if _error_code(result) == constants.NO_HYPERMETRO_EXIST_IN_GROUP: + LOG.info('Hypermetro group %s to sync is empty.', metrogroup_id) + return + _assert_result(result, 'Sync hypermetro group %s error.', + metrogroup_id) + + +class HyperMetro(CommonObject): + _obj_url = '/hyperMetro' + + def add_metro_to_metrogroup(self, metrogroup_id, metro_id): + data = {"ID": metrogroup_id, + "ASSOCIATEOBJID": metro_id} + result = self.post('/associate/pair', data=data) + if _error_code(result) == constants.HYPERMETRO_ALREADY_IN_GROUP: + LOG.warning('Hypermetro %(m_id) to add already in group %(g_id)s', + m_id=metro_id, g_id=metrogroup_id) + return + _assert_result(result, 'Add hypermetro %s to group %s error.', + metro_id, metrogroup_id) + + def remove_metro_from_metrogroup(self, metrogroup_id, metro_id): + data = {"ID": metrogroup_id, + "ASSOCIATEOBJID": metro_id} + result = self.delete('/associate/pair', data=data) + if _error_code(result) == constants.HYPERMETRO_NOT_IN_GROUP: + LOG.warning('Hypermetro %(mid) to remove not in group %(gid)s', + {'mid': metro_id, 'gid': metrogroup_id}) + return + _assert_result(result, 'Delete hypermetro %s from group %s error.', + metro_id, metrogroup_id) + + +class Port(CommonObject): + _obj_url = '/port' + + def add_port_to_portgroup(self, portgroup_id, port_id): + data = {"ASSOCIATEOBJID": port_id, + "ASSOCIATEOBJTYPE": 212, + "ID": portgroup_id} + result = self.post('/associate/portgroup', data=data) + if _error_code(result) == constants.PORT_ALREADY_IN_PORTGROUP: + LOG.warning('Port %(pid)s already in portgroup %(gid)s.', + {'pid': port_id, 'gid': portgroup_id}) + return + _assert_result(result, 'Add port %s to portgroup %s error.', + port_id, portgroup_id) + + def remove_port_from_portgroup(self, portgroup_id, port_id): + result = self.delete('/associate/portgroup?ID=%(gid)s&' + 'ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(pid)s', + gid=portgroup_id, pid=port_id) + if _error_code(result) == constants.PORT_NOT_IN_PORTGROUP: + LOG.warning('Port %(pid)s not in portgroup %(gid)s.', + {'pid': port_id, 'gid': portgroup_id}) + return + _assert_result(result, 'Remove port %s from portgroup %s error.', + port_id, portgroup_id) + + +class RemoteDevice(CommonObject): + _obj_url = '/remote_device' + + def get_remote_device_by_wwn(self, wwn): + result = self.get() + _assert_result(result, 'Get remote devices error.') + for device in result.get('data', []): + if device.get('WWN') == wwn: + return device + + +class ReplicationPair(CommonObject): + _obj_url = '/REPLICATIONPAIR' + + def create_replication_pair(self, pair_params): + result = self.post(data=pair_params) + _assert_result(result, 'Create replication %s error.', pair_params) + return result['data'] + + def get_replication_pair_by_id(self, pair_id): + result = self.get('/%(id)s', id=pair_id) + if _error_code(result) == constants.REPLICATION_PAIR_NOT_EXIST: + _assert_result(result, 'Replication pair %s not exist.', pair_id) + else: + _assert_result(result, 'Get replication pair %s error.', pair_id) + return result['data'] + + def switch_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/switch', data=data) + _assert_result(result, 'Switch over replication pair %s error.', + pair_id) + + def split_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/split', data=data) + _assert_result(result, 'Split replication pair %s error.', pair_id) + + def delete_replication_pair(self, pair_id, force=False): + if force: + data = {"ISLOCALDELETE": force} + result = self.delete('/%(id)s', id=pair_id, data=data) + else: + result = self.delete('/%(id)s', id=pair_id) + + if _error_code(result) == constants.REPLICATION_PAIR_NOT_EXIST: + LOG.warning('Replication pair to delete %s not exist.', + pair_id) + return + _assert_result(result, 'Delete replication pair %s error.', pair_id) + + def sync_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.put('/sync', data=data) + _assert_result(result, 'Sync replication pair %s error.', pair_id) + + def set_replication_pair_second_access(self, pair_id, access): + data = {"SECRESACCESS": access} + result = self.put('/%(id)s', id=pair_id, data=data) + _assert_result(result, 'Set replication pair %s secondary access ' + 'to %s error.', pair_id, access) + + +class ReplicationConsistencyGroup(CommonObject): + _obj_url = '/CONSISTENTGROUP' + + def create_replication_group(self, group_params): + result = self.post(data=group_params) + _assert_result(result, 'Create replication group %s error.', + group_params) + return result['data'] + + def get_replication_group_by_name(self, group_name): + result = self.get('?filter=NAME::%(name)s', name=group_name) + _assert_result(result, 'Get replication group by name %s error.', + group_name) + if 'data' in result and len(result['data']) > 0: + return result['data'][0] + + def get_replication_group_by_id(self, group_id): + result = self.get('/%(id)s', id=group_id) + _assert_result(result, 'Get replication group by id %s error.', + group_id) + return result['data'] + + def delete_replication_group(self, group_id): + result = self.delete('/%(id)s', id=group_id) + if _error_code(result) == constants.REPLICATION_GROUP_NOT_EXIST: + LOG.warning('Replication group %s to delete not exist.', group_id) + return + _assert_result(result, 'Delete replication group %s error.', group_id) + + def set_replication_group_second_access(self, group_id, access): + data = {"SECRESACCESS": access} + result = self.put("/%(id)s", id=group_id, data=data) + _assert_result(result, 'Set replication group %s second access to ' + '%s error.', group_id, access) + + +class LicenseFeature(CommonObject): + _obj_url = '/license/feature' + + def get_feature_status(self): + result = self.get(log_filter=True) + if result['error']['code'] != 0: + LOG.warning('Query feature information failed.') + return {} + + status = {} + for feature in result.get('data', []): + status.update(feature) + + return status + + +class ClonePair(CommonObject): + _obj_url = '/clonepair' + + def create_clone_pair(self, source_id, target_id, clone_speed): + data = {"copyRate": clone_speed, + "sourceID": source_id, + "targetID": target_id, + "isNeedSynchronize": "0"} + result = self.post("/relation", data=data) + _assert_result(result, 'Create ClonePair error, source_id is %s.', + source_id) + return result['data']['ID'] + + def sync_clone_pair(self, pair_id): + data = {"ID": pair_id, "copyAction": 0} + result = self.put("/synchronize", data=data) + _assert_result(result, 'Sync ClonePair error, pair is is %s.', pair_id) + + def get_clone_pair_info(self, pair_id): + result = self.get('/%(id)s', id=pair_id) + _assert_result(result, 'Get ClonePair %s error.', pair_id) + return result.get('data', {}) + + def delete_clone_pair(self, pair_id, delete_dst_lun=False): + data = {"ID": pair_id, + "isDeleteDstLun": delete_dst_lun} + result = self.delete("/%(id)s", id=pair_id, data=data) + if _error_code(result) == constants.CLONE_PAIR_NOT_EXIST: + LOG.warning('ClonePair %s to delete not exist.', pair_id) + return + _assert_result(result, 'Delete ClonePair %s error.', pair_id) + + +class HostNameIgnoringAdapter(HTTPAdapter): + def cert_verify(self, conn, url, verify, cert): + conn.assert_hostname = False + return super(HostNameIgnoringAdapter, self).cert_verify( + conn, url, verify, cert) + + +def rest_operation_wrapper(func): + @functools.wraps(func) + def wrapped(self, url, **kwargs): + need_relogin = False + + if not kwargs.get('log_filter'): + LOG.info('\nURL: %(url)s\n' + 'Method: %(method)s\n' + 'Data: %(data)s\n', + {'url': (self._login_url or '') + url, + 'method': func.__name__, + 'data': kwargs.get('data')}) + + with self._session_lock.read_lock(): + if self._login_url: + full_url = self._login_url + url + old_token = self._session.headers.get('iBaseToken') + try: + r = func(self, full_url, **kwargs) + except requests.RequestException: + LOG.exception('Request URL: %(url)s, method: %(method)s ' + 'failed at first time. Will switch login ' + 'url and retry this request.', + {'url': full_url, + 'method': func.__name__}) + need_relogin = True + else: + r.raise_for_status() + result = r.json() + if (_error_code(result) in + (constants.ERROR_CONNECT_TO_SERVER, + constants.ERROR_UNAUTHORIZED_TO_SERVER)): + need_relogin = True + else: + need_relogin = True + old_token = None + + if need_relogin: + self._relogin(old_token) + try: + with self._session_lock.read_lock(): + full_url = self._login_url + url + r = func(self, full_url, **kwargs) + except requests.RequestException: + LOG.exception('Request URL: %(url)s, method: %(method)s ' + 'failed again.', + {'url': full_url, + 'method': func.__name__}) + raise + + r.raise_for_status() + result = r.json() + if not kwargs.get('log_filter'): + LOG.info('Response: %s', result) + return result + + return wrapped + + +class RestClient(object): + def __init__(self, address, user, password, vstore=None, ssl_verify=None, + cert_path=None): + self.san_address = address + self.san_user = user + self.san_password = password + self.vstore_name = vstore + self.ssl_verify = ssl_verify + self.cert_path = cert_path + + self._login_url = None + self._login_device_id = None + self._session_lock = lockutils.ReaderWriterLock() + self._session = None + self._init_object_methods() + + def _extract_obj_method(self, obj): + filter_method_names = ('login', 'get', 'post', 'delete', 'put') + + def prefilter(m): + return (inspect.ismethod(m) and not inspect.isbuiltin(m) and + m.__name__ not in filter_method_names and + not m.__name__.startswith('_')) + + members = inspect.getmembers(obj, prefilter) + for method in members: + if method[0] in self.__dict__: + msg = _('Method %s already exists in rest client.' + ) % method[0] + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.__dict__[method[0]] = method[1] + + def _init_object_methods(self): + def prefilter(m): + return inspect.isclass(m) and issubclass(m, CommonObject) + + obj_classes = inspect.getmembers(sys.modules[__name__], prefilter) + for cls in obj_classes: + self._extract_obj_method(cls[1](self)) + + def _try_login(self, manage_url): + url = manage_url + "xx/sessions" + data = {"username": self.san_user, + "password": self.san_password, + "scope": "0"} + if self.vstore_name: + data['vstorename'] = self.vstore_name + + r = self._session.post(url, data=json.dumps(data), + timeout=constants.LOGIN_SOCKET_TIMEOUT) + r.raise_for_status() + + result = r.json() + if _error_code(result) != 0: + msg = _("Failed to login URL %(url)s because of %(reason)s." + ) % {"url": url, "reason": result} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._session.headers['iBaseToken'] = result['data']['iBaseToken'] + self._login_device_id = result['data']['deviceid'] + self._login_url = manage_url + self._login_device_id + + if result['data']['accountstate'] in constants.PWD_EXPIRED_OR_INITIAL: + self._session.delete(self._login_url + "/sessions") + self._login_device_id = None + self._login_url = None + msg = ("Storage password has been expired or initial, " + "please change the password.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _loop_login(self): + self._session = requests.Session() + self._session.headers.update({ + "Connection": "keep-alive", + "Content-Type": "application/json; charset=utf-8"}) + self._session.verify = False + if self.ssl_verify: + self._session.verify = self.cert_path + + for url in self.san_address: + try: + self._session.mount(url.lower(), HostNameIgnoringAdapter()) + self._try_login(url) + except Exception: + LOG.exception('Failed to login server %s.', url) + else: + # Sort the login url to the last slot of san addresses, so that + # if this connection error, next time will try other url first. + self.san_address.remove(url) + self.san_address.append(url) + LOG.info('Login %s success.', url) + return + + self._session.close() + self._session = None + + msg = _("Failed to login storage with all rest URLs.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def login(self): + with self._session_lock.write_lock(): + self._loop_login() + + def _relogin(self, old_token): + with self._session_lock.write_lock(): + if (self._session and + self._session.headers.get('iBaseToken') != old_token): + LOG.info('Relogin has been done by other thread, ' + 'no need relogin again.') + return + + # Try to logout the original session first + self._logout() + self._loop_login() + + def _logout(self): + if not self._login_url: + return + + try: + r = self._session.delete(self._login_url + "/sessions") + r.raise_for_status() + except Exception: + LOG.exception("Failed to logout session from URL %s.", + self._login_url) + else: + result = r.json() + if _error_code(result) == 0: + LOG.info("Succeed to logout session from URL %(url)s.", + {"url": self._login_url}) + else: + LOG.warning("Failed to logout session from URL %(url)s " + "because of %(reason)s.", + {"url": self._login_url, "reason": result}) + finally: + self._session.close() + self._session = None + self._login_url = None + self._login_device_id = None + + @property + def device_id(self): + return self._login_device_id + + @rest_operation_wrapper + def get(self, url, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.get(url, timeout=timeout) + + @rest_operation_wrapper + def post(self, url, data, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.post(url, data=json.dumps(data), timeout=timeout) + + @rest_operation_wrapper + def put(self, url, data, timeout=constants.SOCKET_TIMEOUT, **kwargs): + return self._session.put(url, data=json.dumps(data), timeout=timeout) + + @rest_operation_wrapper + def delete(self, url, timeout=constants.SOCKET_TIMEOUT, **kwargs): + if 'data' in kwargs: + return self._session.delete( + url, data=json.dumps(kwargs['data']), timeout=timeout) + else: + return self._session.delete(url, timeout=timeout) + + def add_pair_to_replication_group(self, group_id, pair_id): + data = {'ID': group_id, + 'RMLIST': [pair_id]} + result = self.put('/ADD_MIRROR', data=data) + _assert_result(result, 'Add pair %s to replication group %s error.', + pair_id, group_id) + + def remove_pair_from_replication_group(self, group_id, pair_id): + data = {'ID': group_id, + 'RMLIST': [pair_id]} + result = self.put('/DEL_MIRROR', data=data) + if _error_code(result) in (constants.REPLICATION_PAIR_NOT_EXIST, + constants.REPLICATION_GROUP_NOT_EXIST, + constants.REPLICATION_PAIR_NOT_GROUP_MEMBER, + constants.REPLICATION_GROUP_IS_EMPTY): + LOG.warning('Ignore error %s while remove replication pair ' + 'from group.', _error_code(result)) + return + _assert_result(result, 'Remove pair %s from replication group %s ' + 'error.', pair_id, group_id) + + def split_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SPLIT_CONSISTENCY_GROUP', data=data) + _assert_result(result, 'Split replication group %s error.', group_id) + + def sync_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SYNCHRONIZE_CONSISTENCY_GROUP', data=data) + if _error_code(result) == constants.REPLICATION_GROUP_IS_EMPTY: + LOG.info("Replication group %s to sync is empty.", group_id) + return + _assert_result(result, 'Sync replication group %s error.', group_id) + + def switch_replication_group(self, group_id): + data = {'ID': group_id} + result = self.put('/SWITCH_GROUP_ROLE', data=data) + _assert_result(result, 'Switch replication group %s error.', group_id) + + def get_array_info(self): + result = self.get('/system/') + _assert_result(result, 'Get array info error.') + return result['data'] + + def check_feature(self, obj): + try: + result = self.get('/%s/count' % obj, log_filter=True) + except requests.HTTPError as exc: + if exc.response.status_code == 404: + return False + raise + + return _error_code(result) == 0 + + def get_controller_id(self, controller_name): + result = self.get('/controller') + _assert_result(result, 'Get controllers error.') + + for con in result.get('data', []): + if con.get('LOCATION') == controller_name: + return con['ID'] + + def split_lunclone(self, clone_id): + data = { + "ID": clone_id, + "SPLITACTION": 1, + "ISCLONE": True, + "SPLITSPEED": 4, + } + result = self.put('/lunclone_split_switch', data=data) + _assert_result(result, 'split clone lun %s error.', clone_id) + + def get_workload_type_id(self, workload_type_name): + url = "/workload_type?filter=NAME::%s" % workload_type_name + result = self.get(url) + _assert_result(result, 'Get workload type error') + + for item in result.get("data", []): + if item.get("NAME") == workload_type_name: + return item.get("ID") + + def get_workload_type_name(self, workload_type_id): + url = "/workload_type/%s" % workload_type_id + result = self.get(url) + _assert_result(result, 'Get workload type by id error') + return result.get("data", {}).get("NAME") diff --git a/Cinder/Train/smartx.py b/Cinder/Train/smartx.py new file mode 100644 index 0000000..935d4b7 --- /dev/null +++ b/Cinder/Train/smartx.py @@ -0,0 +1,144 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import utils +from cinder.volume.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class SmartQos(object): + def __init__(self, client): + self.client = client + + def _check_qos_consistency(self, policy, qos): + for key in [k.upper() for k in constants.QOS_SPEC_KEYS]: + if qos.get(key, '0') != policy.get(key, '0'): + return False + return True + + def _change_lun_priority(self, qos, lun_id): + for key in qos: + if key.startswith('MIN') or key.startswith('LATENCY'): + data = {"IOPRIORITY": "3"} + self.client.update_lun(lun_id, data) + break + + @utils.synchronized('huawei_qos', external=True) + def add(self, qos, lun_id): + self._change_lun_priority(qos, lun_id) + qos_id = self.client.create_qos(qos, lun_id) + try: + self.client.activate_deactivate_qos(qos_id, True) + except exception.VolumeBackendAPIException: + self.remove(qos_id, lun_id) + raise + + return qos_id + + @utils.synchronized('huawei_qos', external=True) + def remove(self, qos_id, lun_id, qos_info=None): + if not qos_info: + qos_info = self.client.get_qos_info(qos_id) + lun_list = json.loads(qos_info['LUNLIST']) + if lun_id in lun_list: + lun_list.remove(lun_id) + + if len(lun_list) <= 0: + if qos_info['RUNNINGSTATUS'] != constants.QOS_INACTIVATED: + self.client.activate_deactivate_qos(qos_id, False) + self.client.delete_qos(qos_id) + else: + self.client.update_qos_luns(qos_id, lun_list) + + def update(self, qos_id, new_qos, lun_id): + qos_info = self.client.get_qos_info(qos_id) + if self._check_qos_consistency(qos_info, new_qos): + return + + self.remove(qos_id, lun_id, qos_info) + self.add(new_qos, lun_id) + + +class SmartPartition(object): + def __init__(self, client): + self.client = client + + def add(self, partitionname, lun_id): + partition_id = self.client.get_partition_id_by_name(partitionname) + if not partition_id: + msg = _('Cannot find partition by name %s.') % partitionname + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.client.add_lun_to_partition(lun_id, partition_id) + return partition_id + + def remove(self, partition_id, lun_id): + self.client.remove_lun_from_partition(lun_id, partition_id) + + def update(self, partition_id, partitionname, lun_id): + partition_info = self.client.get_partition_info_by_id(partition_id) + if partition_info['NAME'] == partitionname: + return + + self.remove(partition_id, lun_id) + self.add(partitionname, lun_id) + + def check_partition_valid(self, partitionname): + partition_id = self.client.get_partition_id_by_name(partitionname) + if not partition_id: + msg = _("Partition %s doesn't exist.") % partitionname + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +class SmartCache(object): + def __init__(self, client): + self.client = client + + def add(self, cachename, lun_id): + cache_id = self.client.get_cache_id_by_name(cachename) + if not cache_id: + msg = _('Cannot find cache by name %s.') % cachename + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.client.add_lun_to_cache(lun_id, cache_id) + return cache_id + + def remove(self, cache_id, lun_id): + self.client.remove_lun_from_cache(lun_id, cache_id) + + def update(self, cache_id, cachename, lun_id): + cache_info = self.client.get_cache_info_by_id(cache_id) + if cache_info['NAME'] == cachename: + return + + self.remove(cache_id, lun_id) + self.add(cachename, lun_id) + + def check_cache_valid(self, cachename): + cache_id = self.client.get_cache_id_by_name(cachename) + if not cache_id: + msg = _("Cache %s doesn't exit.") % cachename + LOG.error(msg) + raise exception.InvalidInput(reason=msg) diff --git a/ReleaseDoc/en/OpenStack Cinder Driver Configuration Guide.pdf b/ReleaseDoc/en/OpenStack Cinder Driver Configuration Guide.pdf index b80b8a3..69700ed 100644 Binary files a/ReleaseDoc/en/OpenStack Cinder Driver Configuration Guide.pdf and b/ReleaseDoc/en/OpenStack Cinder Driver Configuration Guide.pdf differ diff --git a/ReleaseDoc/en/OpenStack Manila Driver Configuration Guide.pdf b/ReleaseDoc/en/OpenStack Manila Driver Configuration Guide.pdf index 415ca54..9b86726 100644 Binary files a/ReleaseDoc/en/OpenStack Manila Driver Configuration Guide.pdf and b/ReleaseDoc/en/OpenStack Manila Driver Configuration Guide.pdf differ diff --git "a/ReleaseDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" "b/ReleaseDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" index 611572f..40de543 100644 Binary files "a/ReleaseDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" and "b/ReleaseDoc/zh/OpenStack Cinder Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" differ diff --git "a/ReleaseDoc/zh/OpenStack Manila Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" "b/ReleaseDoc/zh/OpenStack Manila Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" index 9c5c192..1ca9078 100644 Binary files "a/ReleaseDoc/zh/OpenStack Manila Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" and "b/ReleaseDoc/zh/OpenStack Manila Driver\351\205\215\347\275\256\346\214\207\345\215\227.pdf" differ