From af844bf319d15317ace8d6518bfc1fda2336f27d Mon Sep 17 00:00:00 2001 From: doubletao318 Date: Sat, 14 Nov 2020 19:44:20 -0800 Subject: [PATCH] 2.2.RC2 --- Manila/{Ocata => Ocata-eol}/__init__.py | 0 Manila/{Ocata => Ocata-eol}/base.py | 0 Manila/{Ocata => Ocata-eol}/constants.py | 0 Manila/{Ocata => Ocata-eol}/huawei_nas.py | 0 Manila/{Ocata => Ocata-eol}/huawei_utils.py | 0 Manila/{Ocata => Ocata-eol}/v3/__init__.py | 0 Manila/{Ocata => Ocata-eol}/v3/connection.py | 0 Manila/{Ocata => Ocata-eol}/v3/helper.py | 0 Manila/{Ocata => Ocata-eol}/v3/manager.py | 0 Manila/{Ocata => Ocata-eol}/v3/replication.py | 0 Manila/{Ocata => Ocata-eol}/v3/rpcapi.py | 0 Manila/{Ocata => Ocata-eol}/v3/smartx.py | 0 Manila/Pike/constants.py | 1 + Manila/Pike/huawei_utils.py | 9 + Manila/Pike/v3/connection.py | 24 +- Manila/Pike/v3/helper.py | 31 +- Manila/Queens/constants.py | 1 + Manila/Queens/huawei_utils.py | 9 + Manila/Queens/v3/connection.py | 24 +- Manila/Queens/v3/helper.py | 31 +- Manila/Rocky/constants.py | 1 + Manila/Rocky/helper.py | 14 +- Manila/Rocky/huawei_config.py | 15 +- Manila/Rocky/huawei_nas.py | 22 +- Manila/Stein/constants.py | 1 + Manila/Stein/helper.py | 14 +- Manila/Stein/huawei_config.py | 15 +- Manila/Stein/huawei_nas.py | 22 +- Manila/Train/constants.py | 1 + Manila/Train/helper.py | 14 +- Manila/Train/huawei_config.py | 15 +- Manila/Train/huawei_nas.py | 22 +- Manila/Ussuri/__init__.py | 1 + Manila/Ussuri/constants.py | 131 ++ Manila/Ussuri/helper.py | 889 +++++++++ Manila/Ussuri/huawei_config.py | 283 +++ Manila/Ussuri/huawei_nas.py | 1602 +++++++++++++++++ Manila/Ussuri/huawei_utils.py | 279 +++ Manila/Ussuri/hypermetro.py | 131 ++ Manila/Ussuri/manager.py | 67 + Manila/Ussuri/replication.py | 256 +++ Manila/Ussuri/rpcapi.py | 188 ++ Manila/Ussuri/smartx.py | 133 ++ 43 files changed, 4178 insertions(+), 68 deletions(-) rename Manila/{Ocata => Ocata-eol}/__init__.py (100%) rename Manila/{Ocata => Ocata-eol}/base.py (100%) rename Manila/{Ocata => Ocata-eol}/constants.py (100%) rename Manila/{Ocata => Ocata-eol}/huawei_nas.py (100%) rename Manila/{Ocata => Ocata-eol}/huawei_utils.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/__init__.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/connection.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/helper.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/manager.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/replication.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/rpcapi.py (100%) rename Manila/{Ocata => Ocata-eol}/v3/smartx.py (100%) create mode 100644 Manila/Ussuri/__init__.py create mode 100644 Manila/Ussuri/constants.py create mode 100644 Manila/Ussuri/helper.py create mode 100644 Manila/Ussuri/huawei_config.py create mode 100644 Manila/Ussuri/huawei_nas.py create mode 100644 Manila/Ussuri/huawei_utils.py create mode 100644 Manila/Ussuri/hypermetro.py create mode 100644 Manila/Ussuri/manager.py create mode 100644 Manila/Ussuri/replication.py create mode 100644 Manila/Ussuri/rpcapi.py create mode 100644 Manila/Ussuri/smartx.py diff --git a/Manila/Ocata/__init__.py b/Manila/Ocata-eol/__init__.py similarity index 100% rename from Manila/Ocata/__init__.py rename to Manila/Ocata-eol/__init__.py diff --git a/Manila/Ocata/base.py b/Manila/Ocata-eol/base.py similarity index 100% rename from Manila/Ocata/base.py rename to Manila/Ocata-eol/base.py diff --git a/Manila/Ocata/constants.py b/Manila/Ocata-eol/constants.py similarity index 100% rename from Manila/Ocata/constants.py rename to Manila/Ocata-eol/constants.py diff --git a/Manila/Ocata/huawei_nas.py b/Manila/Ocata-eol/huawei_nas.py similarity index 100% rename from Manila/Ocata/huawei_nas.py rename to Manila/Ocata-eol/huawei_nas.py diff --git a/Manila/Ocata/huawei_utils.py b/Manila/Ocata-eol/huawei_utils.py similarity index 100% rename from Manila/Ocata/huawei_utils.py rename to Manila/Ocata-eol/huawei_utils.py diff --git a/Manila/Ocata/v3/__init__.py b/Manila/Ocata-eol/v3/__init__.py similarity index 100% rename from Manila/Ocata/v3/__init__.py rename to Manila/Ocata-eol/v3/__init__.py diff --git a/Manila/Ocata/v3/connection.py b/Manila/Ocata-eol/v3/connection.py similarity index 100% rename from Manila/Ocata/v3/connection.py rename to Manila/Ocata-eol/v3/connection.py diff --git a/Manila/Ocata/v3/helper.py b/Manila/Ocata-eol/v3/helper.py similarity index 100% rename from Manila/Ocata/v3/helper.py rename to Manila/Ocata-eol/v3/helper.py diff --git a/Manila/Ocata/v3/manager.py b/Manila/Ocata-eol/v3/manager.py similarity index 100% rename from Manila/Ocata/v3/manager.py rename to Manila/Ocata-eol/v3/manager.py diff --git a/Manila/Ocata/v3/replication.py b/Manila/Ocata-eol/v3/replication.py similarity index 100% rename from Manila/Ocata/v3/replication.py rename to Manila/Ocata-eol/v3/replication.py diff --git a/Manila/Ocata/v3/rpcapi.py b/Manila/Ocata-eol/v3/rpcapi.py similarity index 100% rename from Manila/Ocata/v3/rpcapi.py rename to Manila/Ocata-eol/v3/rpcapi.py diff --git a/Manila/Ocata/v3/smartx.py b/Manila/Ocata-eol/v3/smartx.py similarity index 100% rename from Manila/Ocata/v3/smartx.py rename to Manila/Ocata-eol/v3/smartx.py diff --git a/Manila/Pike/constants.py b/Manila/Pike/constants.py index 808892e..6758917 100644 --- a/Manila/Pike/constants.py +++ b/Manila/Pike/constants.py @@ -50,6 +50,7 @@ ERROR_USER_OR_GROUP_NOT_EXIST = 1077939723 ERROR_REPLICATION_PAIR_NOT_EXIST = 1077937923 ERROR_HYPERMETRO_NOT_EXIST = 1077674242 +LIF_ALREADY_EXISTS = 1077948993 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' diff --git a/Manila/Pike/huawei_utils.py b/Manila/Pike/huawei_utils.py index 8bceaa4..5173ee4 100644 --- a/Manila/Pike/huawei_utils.py +++ b/Manila/Pike/huawei_utils.py @@ -104,6 +104,15 @@ def get_logical_ips(helper): return [i.strip() for i in config.split(';') if i.strip()] +def get_dns(helper): + root = helper._read_xml() + config = root.findtext('Storage/DNS') + if not config: + return [] + + return [i.strip() for i in config.split(';') if i.strip()] + + def wait_fs_online(helper, fs_id, wait_interval, timeout): def _wait_fs_online(): fs = helper._get_fs_info_by_id(fs_id) diff --git a/Manila/Pike/v3/connection.py b/Manila/Pike/v3/connection.py index b38a06c..9d05cc9 100644 --- a/Manila/Pike/v3/connection.py +++ b/Manila/Pike/v3/connection.py @@ -66,7 +66,7 @@ def __init__(self, configuration, **kwargs): self.rpc_client = v3_rpcapi.HuaweiV3API() self.private_storage = kwargs.get('private_storage') self.qos_support = False - self.snapshot_support = False + self.snapshot_support = True self.replication_support = False self.metro_domain = None self.remote_backend = None @@ -95,7 +95,10 @@ def check_storage_pools(self): all_pool_info = self.helper._find_all_pool_info() for pool in all_pool_info['data']: if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): s_pools.append(pool['NAME']) pool_name_list = root.findtext('Filesystem/StoragePool') @@ -228,6 +231,10 @@ def _get_share_ip(self, share_server, fs_info, vstore_id=None): else: ips = huawei_utils.get_logical_ips(self.helper) + dnses = huawei_utils.get_dns(self.helper) + if dnses: + ips = dnses + return ips def _update_filesystem(self, fs_info, size): @@ -1314,7 +1321,10 @@ def manage_existing(self, share, driver_options): share_size = int(fs['CAPACITY']) / units.Mi / 2 self.helper._change_fs_name(fs_id, share_name) - locations = self._get_location_path(share_name, share_proto) + if share_proto == 'CIFS': + locations = self._get_location_path(old_share_name, share_proto) + else: + locations = self._get_location_path(share_name, share_proto) return share_size, locations def unmanage(self, share): @@ -1611,6 +1621,7 @@ def check_conf_file(self): pwd = root.findtext('Storage/UserPassword') pool_node = root.findtext('Filesystem/StoragePool') logical_port_ip = root.findtext('Storage/LogicalPortIP') + dns = root.findtext('Storage/DNS') if not (resturl and username and pwd): err_msg = (_( @@ -1630,10 +1641,13 @@ def check_conf_file(self): if logical_port_ip: logical_ips = [i.strip() for i in logical_port_ip.split(';') if i.strip()] + dnses = [] + if dns: + dnses = [i.strip() for i in dns.split(';') if i.strip()] if not (self.configuration.driver_handles_share_servers - or logical_ips): + or logical_ips or dnses): err_msg = (_( - 'check_conf_file: Config file invalid. LogicalPortIP ' + 'check_conf_file: Config file invalid. LogicalPortIP or DNS ' 'must be set when driver_handles_share_servers is False.')) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) diff --git a/Manila/Pike/v3/helper.py b/Manila/Pike/v3/helper.py index 822d209..e915c5b 100644 --- a/Manila/Pike/v3/helper.py +++ b/Manila/Pike/v3/helper.py @@ -115,9 +115,9 @@ def login(self): result = self.do_call(url, data, calltimeout=constants.LOGIN_SOCKET_TIMEOUT) - if((result['error']['code'] != 0) - or ("data" not in result) - or (result['data']['deviceid'] is None)): + if ((result['error']['code'] != 0) + or ("data" not in result) + or (result['data']['deviceid'] is None)): LOG.error("Login to %s failed, try another.", item_url) continue @@ -159,8 +159,8 @@ def call(self, url, data=None, method=None): old_url = self.url result = self.do_call(url, data, method) error_code = result['error']['code'] - if(error_code == constants.ERROR_CONNECT_TO_SERVER - or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): + if (error_code == constants.ERROR_CONNECT_TO_SERVER + or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can't open the recent url, re-login.") deviceid = self.login() @@ -297,8 +297,12 @@ def _find_pool_info(self, pool_name, result): poolinfo = {} pool_name = pool_name.strip() for item in result.get('data', []): - if pool_name == item['NAME'] and item['USAGETYPE'] in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + if pool_name == item['NAME'] and (item['USAGETYPE'] in + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE) or + item.get('NEWUSAGETYPE') in + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE)): poolinfo['name'] = pool_name poolinfo['ID'] = item['ID'] poolinfo['CAPACITY'] = item['USERFREECAPACITY'] @@ -790,7 +794,8 @@ def _get_share_name_by_export_location(self, export_location, share_proto): % export_location)) target_ips = huawei_utils.get_logical_ips(self) - if share_ip not in target_ips: + dnses = huawei_utils.get_dns(self) + if share_ip not in target_ips and share_ip not in dnses: raise exception.InvalidInput( reason=_('The share IP %s is not configured.') % share_ip) @@ -1250,10 +1255,14 @@ def get_logical_port_by_id(self, logical_port_id): def modify_logical_port(self, logical_port_id, vstore_id): logical_port_info = self.get_logical_port_by_id(logical_port_id) - logical_port_info.update({'vstoreId': vstore_id, - 'dnsZoneName': ""}) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} url = "/LIF/%s" % logical_port_id - result = self.call(url, jsonutils.dumps(logical_port_info), 'PUT') + result = self.call(url, jsonutils.dumps(data), 'PUT') + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return self._assert_rest_result(result, _('Modify logical port error.')) def delete_logical_port(self, logical_port_id): diff --git a/Manila/Queens/constants.py b/Manila/Queens/constants.py index 808892e..6758917 100644 --- a/Manila/Queens/constants.py +++ b/Manila/Queens/constants.py @@ -50,6 +50,7 @@ ERROR_USER_OR_GROUP_NOT_EXIST = 1077939723 ERROR_REPLICATION_PAIR_NOT_EXIST = 1077937923 ERROR_HYPERMETRO_NOT_EXIST = 1077674242 +LIF_ALREADY_EXISTS = 1077948993 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' diff --git a/Manila/Queens/huawei_utils.py b/Manila/Queens/huawei_utils.py index 8bceaa4..5173ee4 100644 --- a/Manila/Queens/huawei_utils.py +++ b/Manila/Queens/huawei_utils.py @@ -104,6 +104,15 @@ def get_logical_ips(helper): return [i.strip() for i in config.split(';') if i.strip()] +def get_dns(helper): + root = helper._read_xml() + config = root.findtext('Storage/DNS') + if not config: + return [] + + return [i.strip() for i in config.split(';') if i.strip()] + + def wait_fs_online(helper, fs_id, wait_interval, timeout): def _wait_fs_online(): fs = helper._get_fs_info_by_id(fs_id) diff --git a/Manila/Queens/v3/connection.py b/Manila/Queens/v3/connection.py index b38a06c..9d05cc9 100644 --- a/Manila/Queens/v3/connection.py +++ b/Manila/Queens/v3/connection.py @@ -66,7 +66,7 @@ def __init__(self, configuration, **kwargs): self.rpc_client = v3_rpcapi.HuaweiV3API() self.private_storage = kwargs.get('private_storage') self.qos_support = False - self.snapshot_support = False + self.snapshot_support = True self.replication_support = False self.metro_domain = None self.remote_backend = None @@ -95,7 +95,10 @@ def check_storage_pools(self): all_pool_info = self.helper._find_all_pool_info() for pool in all_pool_info['data']: if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): s_pools.append(pool['NAME']) pool_name_list = root.findtext('Filesystem/StoragePool') @@ -228,6 +231,10 @@ def _get_share_ip(self, share_server, fs_info, vstore_id=None): else: ips = huawei_utils.get_logical_ips(self.helper) + dnses = huawei_utils.get_dns(self.helper) + if dnses: + ips = dnses + return ips def _update_filesystem(self, fs_info, size): @@ -1314,7 +1321,10 @@ def manage_existing(self, share, driver_options): share_size = int(fs['CAPACITY']) / units.Mi / 2 self.helper._change_fs_name(fs_id, share_name) - locations = self._get_location_path(share_name, share_proto) + if share_proto == 'CIFS': + locations = self._get_location_path(old_share_name, share_proto) + else: + locations = self._get_location_path(share_name, share_proto) return share_size, locations def unmanage(self, share): @@ -1611,6 +1621,7 @@ def check_conf_file(self): pwd = root.findtext('Storage/UserPassword') pool_node = root.findtext('Filesystem/StoragePool') logical_port_ip = root.findtext('Storage/LogicalPortIP') + dns = root.findtext('Storage/DNS') if not (resturl and username and pwd): err_msg = (_( @@ -1630,10 +1641,13 @@ def check_conf_file(self): if logical_port_ip: logical_ips = [i.strip() for i in logical_port_ip.split(';') if i.strip()] + dnses = [] + if dns: + dnses = [i.strip() for i in dns.split(';') if i.strip()] if not (self.configuration.driver_handles_share_servers - or logical_ips): + or logical_ips or dnses): err_msg = (_( - 'check_conf_file: Config file invalid. LogicalPortIP ' + 'check_conf_file: Config file invalid. LogicalPortIP or DNS ' 'must be set when driver_handles_share_servers is False.')) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) diff --git a/Manila/Queens/v3/helper.py b/Manila/Queens/v3/helper.py index 822d209..e915c5b 100644 --- a/Manila/Queens/v3/helper.py +++ b/Manila/Queens/v3/helper.py @@ -115,9 +115,9 @@ def login(self): result = self.do_call(url, data, calltimeout=constants.LOGIN_SOCKET_TIMEOUT) - if((result['error']['code'] != 0) - or ("data" not in result) - or (result['data']['deviceid'] is None)): + if ((result['error']['code'] != 0) + or ("data" not in result) + or (result['data']['deviceid'] is None)): LOG.error("Login to %s failed, try another.", item_url) continue @@ -159,8 +159,8 @@ def call(self, url, data=None, method=None): old_url = self.url result = self.do_call(url, data, method) error_code = result['error']['code'] - if(error_code == constants.ERROR_CONNECT_TO_SERVER - or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): + if (error_code == constants.ERROR_CONNECT_TO_SERVER + or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can't open the recent url, re-login.") deviceid = self.login() @@ -297,8 +297,12 @@ def _find_pool_info(self, pool_name, result): poolinfo = {} pool_name = pool_name.strip() for item in result.get('data', []): - if pool_name == item['NAME'] and item['USAGETYPE'] in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + if pool_name == item['NAME'] and (item['USAGETYPE'] in + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE) or + item.get('NEWUSAGETYPE') in + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE)): poolinfo['name'] = pool_name poolinfo['ID'] = item['ID'] poolinfo['CAPACITY'] = item['USERFREECAPACITY'] @@ -790,7 +794,8 @@ def _get_share_name_by_export_location(self, export_location, share_proto): % export_location)) target_ips = huawei_utils.get_logical_ips(self) - if share_ip not in target_ips: + dnses = huawei_utils.get_dns(self) + if share_ip not in target_ips and share_ip not in dnses: raise exception.InvalidInput( reason=_('The share IP %s is not configured.') % share_ip) @@ -1250,10 +1255,14 @@ def get_logical_port_by_id(self, logical_port_id): def modify_logical_port(self, logical_port_id, vstore_id): logical_port_info = self.get_logical_port_by_id(logical_port_id) - logical_port_info.update({'vstoreId': vstore_id, - 'dnsZoneName': ""}) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} url = "/LIF/%s" % logical_port_id - result = self.call(url, jsonutils.dumps(logical_port_info), 'PUT') + result = self.call(url, jsonutils.dumps(data), 'PUT') + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return self._assert_rest_result(result, _('Modify logical port error.')) def delete_logical_port(self, logical_port_id): diff --git a/Manila/Rocky/constants.py b/Manila/Rocky/constants.py index e0f31b5..fe8200b 100644 --- a/Manila/Rocky/constants.py +++ b/Manila/Rocky/constants.py @@ -54,6 +54,7 @@ ERROR_HYPERMETRO_NOT_EXIST = 1077674242 SNAPSHOT_NOT_EXIST = 1073754118 SHARE_PATH_INVALID = 1077939729 +LIF_ALREADY_EXISTS = 1077948993 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' diff --git a/Manila/Rocky/helper.py b/Manila/Rocky/helper.py index 09c9887..5464f61 100644 --- a/Manila/Rocky/helper.py +++ b/Manila/Rocky/helper.py @@ -107,12 +107,12 @@ def do_call(self, postfix_url, method, data=None, def _get_user_info(self): if self.nas_username.startswith('!$$$'): - username = base64.b64decode(self.nas_username[4:]) + username = base64.b64decode(self.nas_username[4:]).decode() else: username = self.nas_username if self.nas_password.startswith('!$$$'): - password = base64.b64decode(self.nas_password[4:]) + password = base64.b64decode(self.nas_password[4:]).decode() else: password = self.nas_password @@ -647,10 +647,14 @@ def get_logical_port_by_id(self, logical_port_id): def modify_logical_port(self, logical_port_id, vstore_id): logical_port_info = self.get_logical_port_by_id(logical_port_id) - logical_port_info.update({'vstoreId': vstore_id, - 'dnsZoneName': ""}) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} url = "/LIF/%s" % logical_port_id - result = self.call(url, 'PUT', logical_port_info) + result = self.call(url, 'PUT', data) + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return _assert_result(result, 'Modify logical port error.') def delete_logical_port(self, logical_port_id): diff --git a/Manila/Rocky/huawei_config.py b/Manila/Rocky/huawei_config.py index c889ef6..dba4caf 100644 --- a/Manila/Rocky/huawei_config.py +++ b/Manila/Rocky/huawei_config.py @@ -60,6 +60,7 @@ def update_configs(self): self._cifs_client, self._snapshot_reserve, self._logical_ip, + self._dns, ) for f in attr_funcs: @@ -71,11 +72,13 @@ def _encode_authentication(self, tree, xml_root): need_encode = False if name_node is not None and not name_node.text.startswith('!$$$'): - name_node.text = '!$$$' + base64.b64encode(name_node.text) + name_node.text = '!$$$' + base64.b64encode( + name_node.text.encode()).decode() need_encode = True if pwd_node is not None and not pwd_node.text.startswith('!$$$'): - pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text) + pwd_node.text = '!$$$' + base64.b64encode( + pwd_node.text.encode()).decode() need_encode = True if need_encode: @@ -179,6 +182,14 @@ def _logical_ip(self, xml_root): setattr(self.config, 'logical_ip', logical_ip) + def _dns(self, xml_root): + dns = [] + text = xml_root.findtext('Storage/DNS') + if text: + dns = [i.strip() for i in text.split(";") if i.strip()] + + setattr(self.config, 'dns', dns) + def _ports(self, xml_root): ports = [] text = xml_root.findtext('Storage/Port') diff --git a/Manila/Rocky/huawei_nas.py b/Manila/Rocky/huawei_nas.py index fdbc3eb..57c6917 100644 --- a/Manila/Rocky/huawei_nas.py +++ b/Manila/Rocky/huawei_nas.py @@ -116,7 +116,10 @@ def _check_storage_pools(self): pools = self.helper.get_all_pools() for pool in pools: if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): s_pools.append(pool['NAME']) for pool_name in self.configuration.storage_pools: @@ -127,9 +130,10 @@ def _check_storage_pools(self): def _check_config(self): if (not self.configuration.driver_handles_share_servers and - not self.configuration.logical_ip): - msg = _('Either driver_handles_share_servers or LogicalPortIP ' - 'must be set.') + not self.configuration.logical_ip and + not self.configuration.dns): + msg = _('driver_handles_share_servers or LogicalPortIP ' + 'or DNS must be set at least one.') LOG.error(msg) raise exception.BadConfigurationException(reason=msg) @@ -385,10 +389,17 @@ def _get_export_location(self, share_name, share_proto, else: ips = [self.metro_logic_ip] + dnses = self.configuration.dns + if dnses: + ips = dnses + path_name = huawei_utils.share_name(share_name) if share_proto == 'NFS': locations = ['%s:/%s' % (ip, path_name) for ip in ips] elif share_proto == 'CIFS': + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + path_name = huawei_utils.share_name(share_info.get('NAME')) locations = [r'\\%s\%s' % (ip, path_name) for ip in ips] else: msg = _('Invalid NAS protocol %s.') % share_proto @@ -1035,7 +1046,8 @@ def manage_existing(self, share, driver_options): LOG.error(msg) raise exception.InvalidInput(reason=msg) - if old_share_ip not in self.configuration.logical_ip: + if old_share_ip not in self.configuration.logical_ip \ + and old_share_ip not in self.configuration.dns: msg = _('IP %s inconsistent with logical IP.') % old_share_ip LOG.error(msg) raise exception.InvalidInput(reason=msg) diff --git a/Manila/Stein/constants.py b/Manila/Stein/constants.py index e0f31b5..fe8200b 100644 --- a/Manila/Stein/constants.py +++ b/Manila/Stein/constants.py @@ -54,6 +54,7 @@ ERROR_HYPERMETRO_NOT_EXIST = 1077674242 SNAPSHOT_NOT_EXIST = 1073754118 SHARE_PATH_INVALID = 1077939729 +LIF_ALREADY_EXISTS = 1077948993 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' diff --git a/Manila/Stein/helper.py b/Manila/Stein/helper.py index 09c9887..5464f61 100644 --- a/Manila/Stein/helper.py +++ b/Manila/Stein/helper.py @@ -107,12 +107,12 @@ def do_call(self, postfix_url, method, data=None, def _get_user_info(self): if self.nas_username.startswith('!$$$'): - username = base64.b64decode(self.nas_username[4:]) + username = base64.b64decode(self.nas_username[4:]).decode() else: username = self.nas_username if self.nas_password.startswith('!$$$'): - password = base64.b64decode(self.nas_password[4:]) + password = base64.b64decode(self.nas_password[4:]).decode() else: password = self.nas_password @@ -647,10 +647,14 @@ def get_logical_port_by_id(self, logical_port_id): def modify_logical_port(self, logical_port_id, vstore_id): logical_port_info = self.get_logical_port_by_id(logical_port_id) - logical_port_info.update({'vstoreId': vstore_id, - 'dnsZoneName': ""}) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} url = "/LIF/%s" % logical_port_id - result = self.call(url, 'PUT', logical_port_info) + result = self.call(url, 'PUT', data) + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return _assert_result(result, 'Modify logical port error.') def delete_logical_port(self, logical_port_id): diff --git a/Manila/Stein/huawei_config.py b/Manila/Stein/huawei_config.py index c889ef6..dba4caf 100644 --- a/Manila/Stein/huawei_config.py +++ b/Manila/Stein/huawei_config.py @@ -60,6 +60,7 @@ def update_configs(self): self._cifs_client, self._snapshot_reserve, self._logical_ip, + self._dns, ) for f in attr_funcs: @@ -71,11 +72,13 @@ def _encode_authentication(self, tree, xml_root): need_encode = False if name_node is not None and not name_node.text.startswith('!$$$'): - name_node.text = '!$$$' + base64.b64encode(name_node.text) + name_node.text = '!$$$' + base64.b64encode( + name_node.text.encode()).decode() need_encode = True if pwd_node is not None and not pwd_node.text.startswith('!$$$'): - pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text) + pwd_node.text = '!$$$' + base64.b64encode( + pwd_node.text.encode()).decode() need_encode = True if need_encode: @@ -179,6 +182,14 @@ def _logical_ip(self, xml_root): setattr(self.config, 'logical_ip', logical_ip) + def _dns(self, xml_root): + dns = [] + text = xml_root.findtext('Storage/DNS') + if text: + dns = [i.strip() for i in text.split(";") if i.strip()] + + setattr(self.config, 'dns', dns) + def _ports(self, xml_root): ports = [] text = xml_root.findtext('Storage/Port') diff --git a/Manila/Stein/huawei_nas.py b/Manila/Stein/huawei_nas.py index fdbc3eb..57c6917 100644 --- a/Manila/Stein/huawei_nas.py +++ b/Manila/Stein/huawei_nas.py @@ -116,7 +116,10 @@ def _check_storage_pools(self): pools = self.helper.get_all_pools() for pool in pools: if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): s_pools.append(pool['NAME']) for pool_name in self.configuration.storage_pools: @@ -127,9 +130,10 @@ def _check_storage_pools(self): def _check_config(self): if (not self.configuration.driver_handles_share_servers and - not self.configuration.logical_ip): - msg = _('Either driver_handles_share_servers or LogicalPortIP ' - 'must be set.') + not self.configuration.logical_ip and + not self.configuration.dns): + msg = _('driver_handles_share_servers or LogicalPortIP ' + 'or DNS must be set at least one.') LOG.error(msg) raise exception.BadConfigurationException(reason=msg) @@ -385,10 +389,17 @@ def _get_export_location(self, share_name, share_proto, else: ips = [self.metro_logic_ip] + dnses = self.configuration.dns + if dnses: + ips = dnses + path_name = huawei_utils.share_name(share_name) if share_proto == 'NFS': locations = ['%s:/%s' % (ip, path_name) for ip in ips] elif share_proto == 'CIFS': + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + path_name = huawei_utils.share_name(share_info.get('NAME')) locations = [r'\\%s\%s' % (ip, path_name) for ip in ips] else: msg = _('Invalid NAS protocol %s.') % share_proto @@ -1035,7 +1046,8 @@ def manage_existing(self, share, driver_options): LOG.error(msg) raise exception.InvalidInput(reason=msg) - if old_share_ip not in self.configuration.logical_ip: + if old_share_ip not in self.configuration.logical_ip \ + and old_share_ip not in self.configuration.dns: msg = _('IP %s inconsistent with logical IP.') % old_share_ip LOG.error(msg) raise exception.InvalidInput(reason=msg) diff --git a/Manila/Train/constants.py b/Manila/Train/constants.py index e0f31b5..fe8200b 100644 --- a/Manila/Train/constants.py +++ b/Manila/Train/constants.py @@ -54,6 +54,7 @@ ERROR_HYPERMETRO_NOT_EXIST = 1077674242 SNAPSHOT_NOT_EXIST = 1073754118 SHARE_PATH_INVALID = 1077939729 +LIF_ALREADY_EXISTS = 1077948993 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' diff --git a/Manila/Train/helper.py b/Manila/Train/helper.py index 09c9887..5464f61 100644 --- a/Manila/Train/helper.py +++ b/Manila/Train/helper.py @@ -107,12 +107,12 @@ def do_call(self, postfix_url, method, data=None, def _get_user_info(self): if self.nas_username.startswith('!$$$'): - username = base64.b64decode(self.nas_username[4:]) + username = base64.b64decode(self.nas_username[4:]).decode() else: username = self.nas_username if self.nas_password.startswith('!$$$'): - password = base64.b64decode(self.nas_password[4:]) + password = base64.b64decode(self.nas_password[4:]).decode() else: password = self.nas_password @@ -647,10 +647,14 @@ def get_logical_port_by_id(self, logical_port_id): def modify_logical_port(self, logical_port_id, vstore_id): logical_port_info = self.get_logical_port_by_id(logical_port_id) - logical_port_info.update({'vstoreId': vstore_id, - 'dnsZoneName': ""}) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} url = "/LIF/%s" % logical_port_id - result = self.call(url, 'PUT', logical_port_info) + result = self.call(url, 'PUT', data) + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return _assert_result(result, 'Modify logical port error.') def delete_logical_port(self, logical_port_id): diff --git a/Manila/Train/huawei_config.py b/Manila/Train/huawei_config.py index c889ef6..dba4caf 100644 --- a/Manila/Train/huawei_config.py +++ b/Manila/Train/huawei_config.py @@ -60,6 +60,7 @@ def update_configs(self): self._cifs_client, self._snapshot_reserve, self._logical_ip, + self._dns, ) for f in attr_funcs: @@ -71,11 +72,13 @@ def _encode_authentication(self, tree, xml_root): need_encode = False if name_node is not None and not name_node.text.startswith('!$$$'): - name_node.text = '!$$$' + base64.b64encode(name_node.text) + name_node.text = '!$$$' + base64.b64encode( + name_node.text.encode()).decode() need_encode = True if pwd_node is not None and not pwd_node.text.startswith('!$$$'): - pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text) + pwd_node.text = '!$$$' + base64.b64encode( + pwd_node.text.encode()).decode() need_encode = True if need_encode: @@ -179,6 +182,14 @@ def _logical_ip(self, xml_root): setattr(self.config, 'logical_ip', logical_ip) + def _dns(self, xml_root): + dns = [] + text = xml_root.findtext('Storage/DNS') + if text: + dns = [i.strip() for i in text.split(";") if i.strip()] + + setattr(self.config, 'dns', dns) + def _ports(self, xml_root): ports = [] text = xml_root.findtext('Storage/Port') diff --git a/Manila/Train/huawei_nas.py b/Manila/Train/huawei_nas.py index fdbc3eb..57c6917 100644 --- a/Manila/Train/huawei_nas.py +++ b/Manila/Train/huawei_nas.py @@ -116,7 +116,10 @@ def _check_storage_pools(self): pools = self.helper.get_all_pools() for pool in pools: if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, - constants.DORADO_V6_POOL_TYPE): + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): s_pools.append(pool['NAME']) for pool_name in self.configuration.storage_pools: @@ -127,9 +130,10 @@ def _check_storage_pools(self): def _check_config(self): if (not self.configuration.driver_handles_share_servers and - not self.configuration.logical_ip): - msg = _('Either driver_handles_share_servers or LogicalPortIP ' - 'must be set.') + not self.configuration.logical_ip and + not self.configuration.dns): + msg = _('driver_handles_share_servers or LogicalPortIP ' + 'or DNS must be set at least one.') LOG.error(msg) raise exception.BadConfigurationException(reason=msg) @@ -385,10 +389,17 @@ def _get_export_location(self, share_name, share_proto, else: ips = [self.metro_logic_ip] + dnses = self.configuration.dns + if dnses: + ips = dnses + path_name = huawei_utils.share_name(share_name) if share_proto == 'NFS': locations = ['%s:/%s' % (ip, path_name) for ip in ips] elif share_proto == 'CIFS': + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + path_name = huawei_utils.share_name(share_info.get('NAME')) locations = [r'\\%s\%s' % (ip, path_name) for ip in ips] else: msg = _('Invalid NAS protocol %s.') % share_proto @@ -1035,7 +1046,8 @@ def manage_existing(self, share, driver_options): LOG.error(msg) raise exception.InvalidInput(reason=msg) - if old_share_ip not in self.configuration.logical_ip: + if old_share_ip not in self.configuration.logical_ip \ + and old_share_ip not in self.configuration.dns: msg = _('IP %s inconsistent with logical IP.') % old_share_ip LOG.error(msg) raise exception.InvalidInput(reason=msg) diff --git a/Manila/Ussuri/__init__.py b/Manila/Ussuri/__init__.py new file mode 100644 index 0000000..51eff13 --- /dev/null +++ b/Manila/Ussuri/__init__.py @@ -0,0 +1 @@ +"""Version: 2.2.4""" diff --git a/Manila/Ussuri/constants.py b/Manila/Ussuri/constants.py new file mode 100644 index 0000000..fe8200b --- /dev/null +++ b/Manila/Ussuri/constants.py @@ -0,0 +1,131 @@ +# Copyright (c) 2014 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +PORT_LINKUP = "10" +STATUS_FS_HEALTH = "1" +STATUS_FS_FAULT = "2" +STATUS_FS_RUNNING = "27" +STATUS_SNAPSHOT_HEALTH = '1' +AD_JOIN_DOMAIN = '1' +AD_EXIT_DOMAIN = '0' +AD_JOIN_FAILED = '4' +STATUS_SERVICE_RUNNING = "2" +QOS_ACTIVE = '2' +QOS_INACTIVATED = '45' +MAX_FS_NUM_IN_QOS = 64 + +CAPACITY_UNIT = 1024 * 1024 * 2 +DEFAULT_WAIT_INTERVAL = 3 +DEFAULT_TIMEOUT = 60 +PWD_EXPIRED_OR_INITIAL = (3, 4) + +SOCKET_TIMEOUT = 52 +LOGIN_SOCKET_TIMEOUT = 4 +QOS_NAME_PREFIX = 'OpenStack_' +TMP_PATH_SRC_PREFIX = "huawei_manila_tmp_path_src_" +TMP_PATH_DST_PREFIX = "huawei_manila_tmp_path_dst_" + +ACCESS_NFS_RW = "1" +ACCESS_NFS_RO = "0" +ACCESS_CIFS_FULLCONTROL = "1" +ACCESS_CIFS_RO = "0" + +ERROR_CONNECT_TO_SERVER = -403 +ERROR_UNAUTHORIZED_TO_SERVER = -401 +ERROR_LOGICAL_PORT_EXIST = 1073813505 +ERROR_USER_OR_GROUP_NOT_EXIST = 1077939723 +REPLICATION_PAIR_NOT_EXIST = 1077937923 +OBJECT_NOT_EXIST = 1077948996 +AD_DOMAIN_NOT_EXIST = 1077939763 +FILESYSTEM_NOT_EXIST = 1073752065 +SHARE_NOT_EXIST = 1077939717 +ERROR_HYPERMETRO_NOT_EXIST = 1077674242 +SNAPSHOT_NOT_EXIST = 1073754118 +SHARE_PATH_INVALID = 1077939729 +LIF_ALREADY_EXISTS = 1077948993 + +PORT_TYPE_ETH = '1' +PORT_TYPE_BOND = '7' +PORT_TYPE_VLAN = '8' + +ALLOC_TYPE_THIN_FLAG = "1" +ALLOC_TYPE_THICK_FLAG = "0" + +ALLOC_TYPE_THIN = "Thin" +ALLOC_TYPE_THICK = "Thick" +THIN_PROVISIONING = "true" +THICK_PROVISIONING = "false" + +FILE_SYSTEM_POOL_TYPE = '2' +DORADO_V6_POOL_TYPE = '0' + +QOS_LOWER_LIMIT = ('MINIOPS', 'LATENCY', 'MINBANDWIDTH') +QOS_UPPER_LIMIT = ('MAXIOPS', 'MAXBANDWIDTH') +QOS_KEYS = QOS_LOWER_LIMIT + QOS_UPPER_LIMIT + ('IOTYPE',) +QOS_IO_TYPES = ('0', '1', '2') + +LOCAL_RES_TYPES = (FILE_SYSTEM_TYPE,) = ('40',) + +REPLICA_MODELS = (REPLICA_SYNC_MODEL, + REPLICA_ASYNC_MODEL) = ('1', '2') + +REPLICA_SPEED_MODELS = (REPLICA_SPEED_LOW, + REPLICA_SPEED_MEDIUM, + REPLICA_SPEED_HIGH, + REPLICA_SPEED_HIGHEST) = ('1', '2', '3', '4') + +REPLICA_HEALTH_STATUSES = (REPLICA_HEALTH_STATUS_NORMAL, + REPLICA_HEALTH_STATUS_FAULT, + REPLICA_HEALTH_STATUS_INVALID) = ('1', '2', '14') + +REPLICATION_TYPES = (REMOTE_REPLICATION, LOCAL_REPLICATION) = ('0', '1') + +REPLICA_DATA_STATUSES = ( + REPLICA_DATA_STATUS_SYNCHRONIZED, + REPLICA_DATA_STATUS_COMPLETE, + REPLICA_DATA_STATUS_INCOMPLETE) = ('1', '2', '5') + +REPLICA_DATA_STATUS_IN_SYNC = ( + REPLICA_DATA_STATUS_SYNCHRONIZED, + REPLICA_DATA_STATUS_COMPLETE) + +REPLICA_RUNNING_STATUSES = ( + REPLICA_RUNNING_STATUS_NORMAL, + REPLICA_RUNNING_STATUS_SYNCING, + REPLICA_RUNNING_STATUS_SPLITTED, + REPLICA_RUNNING_STATUS_TO_RECOVER, + REPLICA_RUNNING_STATUS_INTERRUPTED, + REPLICA_RUNNING_STATUS_INVALID) = ( + '1', '23', '26', '33', '34', '35') + +REPLICA_SECONDARY_ACCESS_RIGHTS = ( + REPLICA_SECONDARY_ACCESS_DENIED, + REPLICA_SECONDARY_RO, + REPLICA_SECONDARY_RW) = ('1', '2', '3') + +METRO_RUNNING_STATUSES = ( + METRO_RUNNING_STATUS_NORMAL, + METRO_RUNNING_STATUS_SYNCING, + METRO_RUNNING_STATUS_INVALID, + METRO_RUNNING_STATUS_PAUSE, + METRO_RUNNING_STATUS_FORCED_START, + METRO_RUNNING_STATUS_ERROR, + METRO_RUNNING_STATUS_TO_BE_SYNC) = ( + '1', '23', '35', '41', '93', '94', '100') + +VALID_PRODUCTS = ('V3', 'V5', 'Dorado') + +AVAILABLE_FEATURE_STATUS = (1, 2) +VALID_NETWORK_TYPE = ('flat', 'vlan', 'vxlan', None) diff --git a/Manila/Ussuri/helper.py b/Manila/Ussuri/helper.py new file mode 100644 index 0000000..5464f61 --- /dev/null +++ b/Manila/Ussuri/helper.py @@ -0,0 +1,889 @@ +# Copyright (c) 2014 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import json +import requests +import six +import time + +from oslo_log import log + +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants +from manila.share.drivers.huawei import huawei_utils +from manila import utils + +LOG = log.getLogger(__name__) + + +def _error_code(result): + return result['error']['code'] + + +def _assert_result(result, format_str, *args): + if _error_code(result) != 0: + args += (result,) + msg = (format_str + '\nresult: %s.') % args + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + +class RestHelper(object): + """Helper class for Huawei OceanStor V3 storage system.""" + + def __init__(self, nas_address, nas_username, nas_password): + self.nas_address = nas_address + self.nas_username = nas_username + self.nas_password = nas_password + self.url = None + self.session = None + + LOG.warning("Suppressing requests library SSL Warnings") + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecureRequestWarning) + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecurePlatformWarning) + + def init_http_head(self): + self.url = None + self.session = requests.Session() + self.session.headers.update({ + "Connection": "keep-alive", + "Content-Type": "application/json"}) + self.session.verify = False + + def do_call(self, postfix_url, method, data=None, + timeout=constants.SOCKET_TIMEOUT, **kwargs): + url = self.url + postfix_url + kwargs['timeout'] = timeout + if data: + kwargs['data'] = json.dumps(data) + + log_filter = kwargs.pop('log_filter', False) + if not log_filter: + LOG.info('Request URL: %(url)s\n' + 'Call Method: %(method)s\n' + 'Request Data: %(data)s', + {'url': url, + 'method': method, + 'data': data}) + + func = getattr(self.session, method.lower()) + + try: + res = func(url, **kwargs) + except Exception as exc: + LOG.error('Bad response from server: %(url)s. ' + 'Error: %(err)s.', + {'url': url, 'err': six.text_type(exc)}) + return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER, + "description": "Connect server error"}} + + try: + res.raise_for_status() + except requests.HTTPError as exc: + return {"error": {"code": exc.response.status_code, + "description": six.text_type(exc)}} + + res_json = res.json() + + if not log_filter: + LOG.info('Response Data: %s', res_json) + return res_json + + def _get_user_info(self): + if self.nas_username.startswith('!$$$'): + username = base64.b64decode(self.nas_username[4:]).decode() + else: + username = self.nas_username + + if self.nas_password.startswith('!$$$'): + password = base64.b64decode(self.nas_password[4:]).decode() + else: + password = self.nas_password + + return username, password + + def login(self): + username, password = self._get_user_info() + for item_url in self.nas_address: + data = {"username": username, + "password": password, + "scope": "0"} + self.init_http_head() + self.url = item_url + + LOG.info('Try to login %s.', item_url) + result = self.do_call( + "xx/sessions", 'POST', data, constants.LOGIN_SOCKET_TIMEOUT, + log_filter=True) + if _error_code(result) != 0: + LOG.error("Login %s failed, try another.", item_url) + continue + + LOG.info('Login %s success.', item_url) + self.url = item_url + result['data']['deviceid'] + self.session.headers['iBaseToken'] = result['data']['iBaseToken'] + if (result['data']['accountstate'] + in constants.PWD_EXPIRED_OR_INITIAL): + self.logout() + msg = _("Password has expired or initial, " + "please change the password.") + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + break + else: + msg = _("All url login fail.") + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def logout(self): + url = "/sessions" + if self.url: + result = self.do_call(url, "DELETE") + _assert_result(result, 'Logout session error.') + + @utils.synchronized('huawei_manila') + def call(self, url, method, data=None, **kwargs): + result = self.do_call(url, method, data, **kwargs) + if _error_code(result) in (constants.ERROR_CONNECT_TO_SERVER, + constants.ERROR_UNAUTHORIZED_TO_SERVER): + LOG.error("Can't open the recent url, relogin.") + self.login() + result = self.do_call(url, method, data, **kwargs) + return result + + def create_filesystem(self, fs_param): + url = "/filesystem" + result = self.call(url, 'POST', fs_param) + _assert_result(result, 'Create filesystem %s error.', fs_param) + return result['data']['ID'] + + def create_share(self, share_name, fs_id, share_proto, vstore_id=None): + share_path = huawei_utils.share_path(share_name) + data = { + "DESCRIPTION": share_name, + "FSID": fs_id, + "SHAREPATH": share_path, + } + + if share_proto == 'NFS': + url = "/NFSHARE" + elif share_proto == 'CIFS': + url = "/CIFSHARE" + data["NAME"] = huawei_utils.share_name(share_name) + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + if vstore_id: + data['vstoreId'] = vstore_id + + result = self.call(url, "POST", data) + _assert_result(result, 'Create share for %s error.', share_name) + return result['data']['ID'] + + def update_share(self, share_id, share_proto, params, vstore_id=None): + if share_proto == 'NFS': + url = "/NFSHARE/%s" % share_id + elif share_proto == 'CIFS': + url = "/CIFSHARE/%s" % share_id + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + data = params + if vstore_id: + data['vstoreId'] = vstore_id + result = self.call(url, "PUT", data) + _assert_result(result, 'Update share %s error.', share_id) + + def delete_share(self, share_id, share_proto, vstore_id=None): + if share_proto == 'NFS': + url = "/NFSHARE/%s" % share_id + elif share_proto == 'CIFS': + url = "/CIFSHARE/%s" % share_id + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + data = {'vstoreId': vstore_id} if vstore_id else None + result = self.call(url, "DELETE", data) + if _error_code(result) == constants.SHARE_NOT_EXIST: + LOG.warning('Share %s to delete not exist.', share_id) + return + _assert_result(result, 'Delete share %s error.', share_id) + + def delete_filesystem(self, params): + url = "/filesystem" + result = self.call(url, "DELETE", data=params) + if _error_code(result) == constants.FILESYSTEM_NOT_EXIST: + LOG.warning('FS %s to delete not exist.', params) + return + _assert_result(result, 'Delete filesystem %s error.', params) + + def get_all_pools(self): + url = "/storagepool" + result = self.call(url, "GET") + _assert_result(result, "Query resource pool error.") + return result['data'] + + def get_pool_by_name(self, name, log_filter=False): + url = "/storagepool?filter=NAME::%s" % name + result = self.call(url, "GET", log_filter=log_filter) + _assert_result(result, "Get pool %s error.", name) + if 'data' in result and result['data']: + return result['data'][0] + + def remove_access(self, access_id, share_proto, vstore_id=None): + if share_proto == 'NFS': + url = "/NFS_SHARE_AUTH_CLIENT/%s" % access_id + elif share_proto == 'CIFS': + url = "/CIFS_SHARE_AUTH_CLIENT/%s" % access_id + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + data = {'vstoreId': vstore_id} if vstore_id else None + result = self.call(url, "DELETE", data) + _assert_result(result, 'Delete access %s error.', access_id) + + def get_share_access(self, share_id, access_to, share_proto, + vstore_id=None): + # Huawei array uses * to represent IP addresses of all clients + if access_to == '0.0.0.0/0': + access_to = '*' + + accesses = self.get_all_share_access(share_id, share_proto, vstore_id) + for access in accesses: + if access['NAME'] in (access_to, '@' + access_to): + return access + + def _get_share_access_count(self, share_id, share_proto, vstore_id=None): + if share_proto == 'NFS': + url = "/NFS_SHARE_AUTH_CLIENT" + elif share_proto == 'CIFS': + url = "/CIFS_SHARE_AUTH_CLIENT" + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + url += "/count?filter=PARENTID::%s" % share_id + data = {'vstoreId': vstore_id} if vstore_id else None + result = self.call(url, "GET", data) + + _assert_result(result, 'Get access count of share %s error.', share_id) + return int(result['data']['COUNT']) + + def _get_share_access_by_range(self, share_id, share_proto, + range, vstore_id=None): + if share_proto == 'NFS': + url = "/NFS_SHARE_AUTH_CLIENT" + elif share_proto == 'CIFS': + url = "/CIFS_SHARE_AUTH_CLIENT" + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + url += "?filter=PARENTID::%s" % share_id + url += "&range=[%s-%s]" % range + data = {'vstoreId': vstore_id} if vstore_id else None + result = self.call(url, "GET", data) + _assert_result(result, 'Get accesses of share %s error.', share_id) + return result.get('data', []) + + def get_all_share_access(self, share_id, share_proto, vstore_id=None): + count = self._get_share_access_count(share_id, share_proto, vstore_id) + if count <= 0: + return [] + + all_accesses = [] + for i in range(count // 100 + 1): + query_range = i * 100, (i + 1) * 100 + accesses = self._get_share_access_by_range( + share_id, share_proto, query_range, vstore_id) + all_accesses.extend(accesses) + + return all_accesses + + def change_access(self, access_id, share_proto, access_level, + vstore_id=None): + if share_proto == 'NFS': + url = "/NFS_SHARE_AUTH_CLIENT/%s" % access_id + access = {"ACCESSVAL": access_level} + elif share_proto == 'CIFS': + url = "/CIFS_SHARE_AUTH_CLIENT/" + access_id + access = {"PERMISSION": access_level} + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + if vstore_id: + access['vstoreId'] = vstore_id + result = self.call(url, "PUT", access) + _assert_result(result, 'Change access %s level to %s error.', + access_id, access_level) + + def allow_access(self, share_id, access_to, share_proto, access_level, + share_type_id=None, vstore_id=None): + if share_proto == 'NFS': + self._allow_nfs_access( + share_id, access_to, access_level, share_type_id, vstore_id) + elif share_proto == 'CIFS': + self._allow_cifs_access( + share_id, access_to, access_level, vstore_id) + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + def _allow_nfs_access(self, share_id, access_to, access_level, + share_type_id, vstore_id): + # Huawei array uses * to represent IP addresses of all clients + if access_to == '0.0.0.0/0': + access_to = '*' + access = { + "NAME": access_to, + "PARENTID": share_id, + "ACCESSVAL": access_level, + "SYNC": "0", + "ALLSQUASH": "1", + "ROOTSQUASH": "0", + } + + if share_type_id: + opts = huawei_utils.get_share_privilege(share_type_id) + access.update(opts) + + if vstore_id: + access['vstoreId'] = vstore_id + + result = self.call("/NFS_SHARE_AUTH_CLIENT", "POST", access) + _assert_result(result, 'Allow NFS access %s error.', access) + + def _allow_cifs_access(self, share_id, access_to, access_level, vstore_id): + access = { + "NAME": access_to, + "PARENTID": share_id, + "PERMISSION": access_level, + "DOMAINTYPE": '2' if '\\' not in access_to else '0', + } + if vstore_id: + access['vstoreId'] = vstore_id + + result = self.call("/CIFS_SHARE_AUTH_CLIENT", "POST", access) + if _error_code(result) == constants.ERROR_USER_OR_GROUP_NOT_EXIST: + # If add user access failed, try to add group access. + access['NAME'] = '@' + access_to + result = self.call("/CIFS_SHARE_AUTH_CLIENT", "POST", access) + _assert_result(result, 'Allow CIFS access %s error.', access) + + def get_snapshot_by_id(self, snap_id): + url = "/FSSNAPSHOT/" + snap_id + result = self.call(url, "GET") + _assert_result(result, 'Get snapshot by id %s error.', snap_id) + return result['data'] + + def delete_snapshot(self, snap_id): + url = "/FSSNAPSHOT/%s" % snap_id + result = self.call(url, "DELETE") + if _error_code(result) == constants.SNAPSHOT_NOT_EXIST: + LOG.warning('Snapshot %s to delete not exist.', snap_id) + return + _assert_result(result, 'Delete snapshot %s error.', snap_id) + + def create_snapshot(self, fs_id, snapshot_name): + data = { + "PARENTTYPE": "40", + "PARENTID": fs_id, + "NAME": huawei_utils.snapshot_name(snapshot_name), + } + result = self.call("/FSSNAPSHOT", "POST", data) + _assert_result(result, 'Create snapshot %s error.', data) + return result['data']['ID'] + + def get_share_by_name(self, share_name, share_proto, vstore_id=None): + if share_proto == 'NFS': + share_path = huawei_utils.share_path(share_name) + url = "/NFSHARE?filter=SHAREPATH::%s&range=[0-100]" % share_path + elif share_proto == 'CIFS': + cifs_share = huawei_utils.share_name(share_name) + url = "/CIFSHARE?filter=NAME:%s&range=[0-100]" % cifs_share + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + data = {'vstoreId': vstore_id} if vstore_id else None + result = self.call(url, "GET", data) + if _error_code(result) == constants.SHARE_PATH_INVALID: + LOG.warning('Share %s not exist.', share_name) + return + + _assert_result(result, 'Get share by name %s error.', share_name) + + # for CIFS, if didn't get share by NAME, try DESCRIPTION + if share_proto == 'CIFS' and not result.get('data'): + url = "/CIFSHARE?filter=DESCRIPTION:%s&range=[0-100]" % share_name + result = self.call(url, "GET", data) + + if result.get('data'): + return result['data'][0] + + def get_fs_info_by_name(self, name): + url = "/filesystem?filter=NAME::%s" % huawei_utils.share_name(name) + result = self.call(url, "GET") + _assert_result(result, 'Get filesystem by name %s error.', name) + if 'data' in result and result['data']: + return result['data'][0] + + def get_fs_info_by_id(self, fs_id): + url = "/filesystem/%s" % fs_id + result = self.call(url, "GET") + _assert_result(result, "Get filesystem by id %s error.", fs_id) + return result['data'] + + def update_filesystem(self, fs_id, params): + url = "/filesystem/%s" % fs_id + result = self.call(url, "PUT", params) + _assert_result(result, "Update filesystem %s by %s error.", + fs_id, params) + + def get_partition_id_by_name(self, name): + url = "/cachepartition?filter=NAME::%s" % name + result = self.call(url, "GET") + _assert_result(result, 'Get partition by name %s error.', name) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def get_partition_info_by_id(self, partitionid): + url = '/cachepartition/' + partitionid + result = self.call(url, "GET") + _assert_result(result, 'Get partition by id %s error.', partitionid) + return result['data'] + + def add_fs_to_partition(self, fs_id, partition_id): + url = "/smartPartition/addFs" + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 40, + "ASSOCIATEOBJID": fs_id, + } + result = self.call(url, "PUT", data) + _assert_result(result, 'Add FS %s to partition %s error.', + fs_id, partition_id) + + def remove_fs_from_partition(self, fs_id, partition_id): + url = "/smartPartition/removeFs" + data = {"ID": partition_id, + "ASSOCIATEOBJTYPE": 40, + "ASSOCIATEOBJID": fs_id, + } + result = self.call(url, "PUT", data) + _assert_result(result, 'Remove FS %s from partition %s error.', + fs_id, partition_id) + + def rename_snapshot(self, snapshot_id, new_name): + url = "/FSSNAPSHOT/%s" % snapshot_id + data = {"NAME": huawei_utils.snapshot_name(new_name)} + result = self.call(url, "PUT", data) + _assert_result(result, 'Rename snapshot %s error.', snapshot_id) + + def get_cache_id_by_name(self, name): + url = "/SMARTCACHEPARTITION?filter=NAME::%s" % name + result = self.call(url, "GET") + _assert_result(result, 'Get cache by name %s error.', name) + if 'data' in result and result['data']: + return result['data'][0]['ID'] + + def get_cache_info_by_id(self, cacheid): + url = "/SMARTCACHEPARTITION/" + cacheid + result = self.call(url, "GET") + _assert_result(result, 'Get smartcache by id %s error.', cacheid) + return result['data'] + + def add_fs_to_cache(self, fs_id, cache_id): + url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 40, + "ASSOCIATEOBJID": fs_id, + } + result = self.call(url, "PUT", data) + _assert_result(result, 'Add FS %s to cache %s error.', + fs_id, cache_id) + + def remove_fs_from_cache(self, fs_id, cache_id): + url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" + data = {"ID": cache_id, + "ASSOCIATEOBJTYPE": 40, + "ASSOCIATEOBJID": fs_id, + } + result = self.call(url, "PUT", data) + _assert_result(result, 'Remove FS %s from cache %s error.', + fs_id, cache_id) + + def get_all_qos(self): + url = "/ioclass" + result = self.call(url, "GET") + _assert_result(result, 'Get all QoS error.') + return result.get('data', []) + + def update_qos_fs(self, qos_id, new_fs_list): + url = "/ioclass/%s" % qos_id + data = {"FSLIST": new_fs_list} + result = self.call(url, "PUT", data) + _assert_result(result, 'Associate FS %s to Qos %s error.', + new_fs_list, qos_id) + + def create_qos(self, qos, fs_id): + localtime = time.strftime('%Y%m%d%H%M%S', time.localtime()) + qos_name = constants.QOS_NAME_PREFIX + fs_id + '_' + localtime + data = {"NAME": qos_name, + "FSLIST": [fs_id], + "CLASSTYPE": "1", + "SCHEDULEPOLICY": "1", + "SCHEDULESTARTTIME": "1410969600", + "STARTTIME": "00:00", + "DURATION": "86400", + } + data.update(qos) + result = self.call("/ioclass", 'POST', data) + _assert_result(result, 'Create QoS %s error.', data) + return result['data']['ID'] + + def activate_deactivate_qos(self, qos_id, enable_status): + url = "/ioclass/active" + data = {"ID": qos_id, + "ENABLESTATUS": enable_status} + result = self.call(url, "PUT", data) + _assert_result(result, 'Activate or deactivate QoS %s error.', qos_id) + + def delete_qos(self, qos_id): + url = "/ioclass/%s" % qos_id + result = self.call(url, 'DELETE') + _assert_result(result, 'Delete QoS %s error.', qos_id) + + def get_qos_info(self, qos_id): + url = "/ioclass/%s" % qos_id + result = self.call(url, "GET") + _assert_result(result, 'Get QoS info by id %s error.', qos_id) + return result['data'] + + def get_all_eth_port(self): + result = self.call("/eth_port", 'GET') + _assert_result(result, 'Get all eth port error.') + return result.get('data', []) + + def get_all_bond_port(self): + result = self.call("/bond_port", 'GET') + _assert_result(result, 'Get all bond port error.') + return result.get('data', []) + + def get_all_vlan(self): + result = self.call("/vlan", 'GET') + _assert_result(result, 'Get all vlan error.') + return result.get('data', []) + + def get_vlan_by_tag(self, vlan_tag): + url = "/vlan?filter=TAG::%s" % vlan_tag + result = self.call(url, 'GET') + _assert_result(result, 'Get vlan by tag %s error.', vlan_tag) + return result.get('data', []) + + def create_vlan(self, port_id, port_type, vlan_tag): + data = {"PORTID": port_id, + "PORTTYPE": port_type, + "TAG": vlan_tag, + } + result = self.call("/vlan", "POST", data) + _assert_result(result, 'Create vlan %s error.', data) + return result['data']['ID'] + + def delete_vlan(self, vlan_id): + url = "/vlan/%s" % vlan_id + result = self.call(url, 'DELETE') + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('vlan %s to delete not exist.', vlan_id) + return + _assert_result(result, 'Delete vlan %s error.', vlan_id) + + def get_logical_port_by_ip(self, ip, ip_type): + if ip_type == 4: + url = "/LIF?filter=IPV4ADDR::%s" % ip + else: + url = "/LIF?filter=IPV6ADDR::%s" % ip + result = self.call(url, 'GET') + _assert_result(result, 'Get logical port by IP %s error.', ip) + if 'data' in result and result['data']: + return result['data'][0] + + def create_logical_port(self, params): + result = self.call("/LIF", 'POST', params) + _assert_result(result, 'Create logical port %s error.', params) + return result['data']['ID'] + + def get_all_logical_port(self): + result = self.call("/LIF", 'GET') + _assert_result(result, 'Get all logical port error.') + return result.get('data', []) + + def get_logical_port_by_id(self, logical_port_id): + url = "/LIF/%s" % logical_port_id + result = self.call(url, 'GET') + _assert_result(result, 'Get logical port error.') + return result.get('data', {}) + + def modify_logical_port(self, logical_port_id, vstore_id): + logical_port_info = self.get_logical_port_by_id(logical_port_id) + data = {'vstoreId': vstore_id, + 'dnsZoneName': "", + 'NAME': logical_port_info.get('NAME'), + 'ID': logical_port_info.get('ID')} + url = "/LIF/%s" % logical_port_id + result = self.call(url, 'PUT', data) + if result['error']['code'] == constants.LIF_ALREADY_EXISTS: + return + _assert_result(result, 'Modify logical port error.') + + def delete_logical_port(self, logical_port_id): + url = "/LIF/%s" % logical_port_id + result = self.call(url, 'DELETE') + if _error_code(result) == constants.OBJECT_NOT_EXIST: + LOG.warning('Logical port %s to delete not exist.', + logical_port_id) + return + _assert_result(result, 'Delete logical port %s error.', + logical_port_id) + + def set_dns_ip_address(self, dns_ip_list): + if len(dns_ip_list) > 3: + msg = _('3 IPs can be set to DNS most.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + dns_info = {"ADDRESS": json.dumps(dns_ip_list)} + result = self.call("/DNS_Server", 'PUT', dns_info) + _assert_result(result, 'Set DNS ip address %s error.', dns_ip_list) + + def get_dns_ip_address(self): + result = self.call("/DNS_Server", 'GET') + _assert_result(result, 'Get DNS ip address error.') + if 'data' in result: + return json.loads(result['data']['ADDRESS']) + + def add_ad_config(self, user, password, domain): + info = {"DOMAINSTATUS": 1, + "ADMINNAME": user, + "ADMINPWD": password, + "FULLDOMAINNAME": domain, + } + result = self.call("/AD_CONFIG", 'PUT', info) + _assert_result(result, 'Add AD config %s error.', info) + + def delete_ad_config(self, user, password): + info = {"DOMAINSTATUS": 0, + "ADMINNAME": user, + "ADMINPWD": password, + } + result = self.call("/AD_CONFIG", 'PUT', info) + _assert_result(result, 'Delete AD config %s error.', info) + + def get_ad_config(self): + result = self.call("/AD_CONFIG", 'GET') + _assert_result(result, 'Get AD config error.') + return result.get('data') + + def add_ldap_config(self, server, domain): + info = {"BASEDN": domain, + "LDAPSERVER": server, + "PORTNUM": 389, + "TRANSFERTYPE": "1", + } + result = self.call("/LDAP_CONFIG", 'PUT', info) + _assert_result(result, 'Add LDAP config %s error.', info) + + def delete_ldap_config(self): + result = self.call("/LDAP_CONFIG", 'DELETE') + if _error_code(result) == constants.AD_DOMAIN_NOT_EXIST: + LOG.warning('LDAP config not exist while deleting.') + return + _assert_result(result, 'Delete LDAP config error.') + + def get_ldap_config(self): + result = self.call("/LDAP_CONFIG", 'GET') + _assert_result(result, 'Get LDAP config error.') + return result.get('data') + + def get_array_wwn(self): + result = self.call("/system/", "GET") + _assert_result(result, 'Get array info error.') + return result['data']['wwn'] + + def get_remote_device_by_wwn(self, wwn): + result = self.call("/remote_device", "GET") + _assert_result(result, 'Get all remote devices error.') + for device in result.get('data', []): + if device.get('WWN') == wwn: + return device + + def create_replication_pair(self, params): + result = self.call("/REPLICATIONPAIR", "POST", params) + _assert_result(result, 'Create replication pair %s error.', params) + return result['data'] + + def split_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.call('/REPLICATIONPAIR/split', "PUT", data) + _assert_result(result, 'Split replication pair %s error.', pair_id) + + def switch_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.call('/REPLICATIONPAIR/switch', "PUT", data) + _assert_result(result, 'Switch replication pair %s error.', pair_id) + + def delete_replication_pair(self, pair_id): + url = "/REPLICATIONPAIR/%s" % pair_id + result = self.call(url, "DELETE") + if _error_code(result) == constants.REPLICATION_PAIR_NOT_EXIST: + LOG.warning('Replication pair %s to delete not exist.', pair_id) + return + _assert_result(result, 'Delete replication pair %s error.', pair_id) + + def sync_replication_pair(self, pair_id): + data = {"ID": pair_id} + result = self.call("/REPLICATIONPAIR/sync", "PUT", data) + _assert_result(result, 'Sync replication pair %s error.', pair_id) + + def cancel_pair_secondary_write_lock(self, pair_id): + url = "/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK" + data = {"ID": pair_id} + result = self.call(url, "PUT", data) + _assert_result(result, 'Cancel replication pair %s secondary ' + 'write lock error.', pair_id) + + def set_pair_secondary_write_lock(self, pair_id): + url = "/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK" + data = {"ID": pair_id} + result = self.call(url, "PUT", data) + _assert_result(result, 'Set replication pair %s secondary ' + 'write lock error.', pair_id) + + def get_replication_pair_by_id(self, pair_id): + url = "/REPLICATIONPAIR/%s" % pair_id + result = self.call(url, "GET") + _assert_result(result, 'Get replication pair by id %s error.', pair_id) + return result.get('data', {}) + + def get_replication_pair_by_localres_name(self, local_res): + url = "/REPLICATIONPAIR?filter=LOCALRESNAME::" + local_res + result = self.call(url, "GET") + _assert_result(result, 'Get replication pair by local resource ' + 'name %s error.', local_res) + return result.get('data') + + def get_feature_status(self): + result = self.call('/license/feature', 'GET', log_filter=True) + if result['error']['code'] != 0: + LOG.warning('Query feature information failed.') + return {} + + status = {} + for feature in result.get('data', []): + status.update(feature) + + return status + + def get_vstore_pair(self, pair_id): + url = '/vstore_pair/%s' % pair_id + result = self.call(url, 'GET') + _assert_result(result, 'Get vstore pair info %s error.', pair_id) + return result['data'] + + def rollback_snapshot(self, snap_id): + data = {"ID": snap_id} + result = self.call("/FSSNAPSHOT/ROLLBACK_FSSNAPSHOT", "PUT", data) + _assert_result(result, 'Failed to rollback snapshot %s.', snap_id) + + def get_controller_id(self, controller_name): + result = self.call('/controller', 'GET') + _assert_result(result, 'Get controllers error.') + + for con in result.get('data', []): + if con.get('LOCATION') == controller_name: + return con['ID'] + + def split_clone_fs(self, fs_id): + data = {"ID": fs_id, + "SPLITENABLE": True, + "SPLITSPEED": 4, + } + result = self.call("/filesystem_split_switch", "PUT", data) + _assert_result(result, 'Split clone fs %s error.', fs_id) + + def create_hypermetro_pair(self, params): + result = self.call("/HyperMetroPair", "POST", params) + _assert_result(result, 'Create HyperMetro pair %s error.', params) + return result['data'] + + def get_hypermetro_pair_by_id(self, pair_id): + url = "/HyperMetroPair/%s" % pair_id + result = self.call(url, "GET") + _assert_result(result, 'Get HyperMetro pair %s error.', pair_id) + return result['data'] + + def suspend_hypermetro_pair(self, pair_id): + params = {"ID": pair_id, + "ISPRIMARY": False} + url = "/HyperMetroPair/disable_hcpair" + result = self.call(url, "PUT", params) + _assert_result(result, 'Suspend HyperMetro pair %s error.', pair_id) + + def sync_hypermetro_pair(self, pair_id): + data = {"ID": pair_id} + result = self.call("/HyperMetroPair/synchronize_hcpair", "PUT", data) + _assert_result(result, 'Sync HyperMetro pair %s error.', pair_id) + + def delete_hypermetro_pair(self, pair_id): + url = "/HyperMetroPair/%s" % pair_id + result = self.call(url, "DELETE") + if _error_code(result) == constants.ERROR_HYPERMETRO_NOT_EXIST: + LOG.warning('Hypermetro pair %s to delete not exist.', pair_id) + return + _assert_result(result, 'Delete HyperMetro pair %s error.', pair_id) + + def get_hypermetro_domain_id(self, domain_name): + result = self.call("/HyperMetroDomain?range=[0-100]", "GET") + _assert_result(result, "Get HyperMetro domains info error.") + for item in result.get("data", []): + if item.get("NAME") == domain_name: + return item.get("ID") + + def get_hypermetro_vstore_id(self, domain_name, local_vstore_name, + remote_vstore_name): + result = self.call("/vstore_pair?range=[0-100]", "GET", + data=None, log_filter=True) + _assert_result(result, "Get HyperMetro vstore_pair id error.") + for item in result.get("data", []): + if item.get("DOMAINNAME") == domain_name and item.get( + "LOCALVSTORENAME") == local_vstore_name and item.get( + "REMOTEVSTORENAME") == remote_vstore_name: + return item.get("ID") + return None + + def get_hypermetro_vstore_by_pair_id(self, vstore_pair_id): + url = "/vstore_pair/%s" % vstore_pair_id + result = self.call(url, 'GET', data=None, log_filter=True) + _assert_result(result, "Get HyperMetro vstore_pair info by id error.") + return result["data"] diff --git a/Manila/Ussuri/huawei_config.py b/Manila/Ussuri/huawei_config.py new file mode 100644 index 0000000..dba4caf --- /dev/null +++ b/Manila/Ussuri/huawei_config.py @@ -0,0 +1,283 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import os + +from oslo_log import log as logging +from oslo_utils import strutils +from xml.etree import ElementTree as ET + +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class HuaweiConfig(object): + def __init__(self, config): + self.config = config + self.last_modify_time = None + self.update_configs() + + def update_configs(self): + file_time = os.stat(self.config.manila_huawei_conf_file).st_mtime + if self.last_modify_time == file_time: + return + + self.last_modify_time = file_time + + tree = ET.parse(self.config.manila_huawei_conf_file) + xml_root = tree.getroot() + self._encode_authentication(tree, xml_root) + + attr_funcs = ( + self._nas_address, + self._nas_user, + self._nas_password, + self._nas_product, + self._ports, + self._snapshot_support, + self._replication_support, + self._wait_interval, + self._timeout, + self._storage_pools, + self._sector_size, + self._nfs_client, + self._cifs_client, + self._snapshot_reserve, + self._logical_ip, + self._dns, + ) + + for f in attr_funcs: + f(xml_root) + + def _encode_authentication(self, tree, xml_root): + name_node = xml_root.find('Storage/UserName') + pwd_node = xml_root.find('Storage/UserPassword') + + need_encode = False + if name_node is not None and not name_node.text.startswith('!$$$'): + name_node.text = '!$$$' + base64.b64encode( + name_node.text.encode()).decode() + need_encode = True + + if pwd_node is not None and not pwd_node.text.startswith('!$$$'): + pwd_node.text = '!$$$' + base64.b64encode( + pwd_node.text.encode()).decode() + need_encode = True + + if need_encode: + tree.write(self.config.manila_huawei_conf_file, 'UTF-8') + + def _nas_address(self, xml_root): + text = xml_root.findtext('Storage/RestURL') + if not text: + msg = _("RestURL is not configured.") + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + addrs = set([x.strip() for x in text.split(';') if x.strip()]) + setattr(self.config, 'nas_address', list(addrs)) + + def _nas_user(self, xml_root): + text = xml_root.findtext('Storage/UserName') + if not text: + msg = _("UserName is not configured.") + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'nas_user', text.strip()) + + def _nas_password(self, xml_root): + text = xml_root.findtext('Storage/UserPassword') + if not text: + msg = _("UserPassword is not configured.") + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'nas_password', text.strip()) + + def _nas_product(self, xml_root): + text = xml_root.findtext('Storage/Product') + if not text: + msg = _("Storage product is not configured.") + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + if text not in constants.VALID_PRODUCTS: + msg = _("Invalid storage product %(text)s, must be " + "in %(valid)s." + ) % {'text': text, + 'valid': constants.VALID_PRODUCTS} + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'nas_product', text) + + def _wait_interval(self, xml_root): + interval = constants.DEFAULT_WAIT_INTERVAL + text = xml_root.findtext('Filesystem/WaitInterval') + if text: + interval = int(text.strip()) + if interval <= 0: + msg = _("Invalid WaitInterval config %s, " + "must be a positive digit.") % text + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'wait_interval', interval) + + def _timeout(self, xml_root): + timeout = constants.DEFAULT_TIMEOUT + text = xml_root.findtext('Filesystem/Timeout') + if text: + timeout = int(text.strip()) + if timeout <= 0: + msg = _("Invalid Timeout config %s, must be " + "a positive digit.") % text + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'timeout', timeout) + + def _storage_pools(self, xml_root): + text = xml_root.findtext('Filesystem/StoragePool') + if not text: + msg = _('StoragePool must be configured.') + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + pools = set() + for pool in text.split(';'): + if pool.strip(): + pools.add(pool.strip()) + + if not pools: + msg = _('No valid storage pool configured.') + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + setattr(self.config, 'storage_pools', list(pools)) + + def _logical_ip(self, xml_root): + logical_ip = [] + text = xml_root.findtext('Storage/LogicalPortIP') + if text: + logical_ip = [i.strip() for i in text.split(";") if i.strip()] + + setattr(self.config, 'logical_ip', logical_ip) + + def _dns(self, xml_root): + dns = [] + text = xml_root.findtext('Storage/DNS') + if text: + dns = [i.strip() for i in text.split(";") if i.strip()] + + setattr(self.config, 'dns', dns) + + def _ports(self, xml_root): + ports = [] + text = xml_root.findtext('Storage/Port') + if text: + for port in text.split(";"): + if port.strip(): + ports.append(port.strip()) + + setattr(self.config, 'ports', ports) + + def _sector_size(self, xml_root): + text = xml_root.findtext('Filesystem/SectorSize') + if text and text.strip(): + setattr(self.config, 'sector_size', text.strip()) + + def _snapshot_support(self, xml_root): + snapshot_support = True + text = xml_root.findtext('Storage/SnapshotSupport') + if text: + snapshot_support = strutils.bool_from_string( + text.strip(), strict=True) + setattr(self.config, 'snapshot_support', snapshot_support) + + def _replication_support(self, xml_root): + replication_support = False + text = xml_root.findtext('Storage/ReplicationSupport') + if text: + replication_support = strutils.bool_from_string( + text.strip(), strict=True) + setattr(self.config, 'replication_support', replication_support) + + def _nfs_client(self, xml_root): + text = xml_root.findtext('Filesystem/NFSClient/IP') + if text and text.strip(): + nfs_client_ip = text.strip() + else: + nfs_client_ip = None + setattr(self.config, 'nfs_client_ip', nfs_client_ip) + + def _cifs_client(self, xml_root): + text = xml_root.findtext('Filesystem/CIFSClient/UserName') + if text and text.strip(): + cifs_client_name = text.strip() + else: + cifs_client_name = None + setattr(self.config, 'cifs_client_name', cifs_client_name) + + text = xml_root.findtext('Filesystem/CIFSClient/UserPassword') + if text and text.strip(): + cifs_client_password = text.strip() + else: + cifs_client_password = None + setattr(self.config, 'cifs_client_password', cifs_client_password) + + def _snapshot_reserve(self, xml_root): + snapshot_reserve = xml_root.findtext('Filesystem/SnapshotReserve') + if snapshot_reserve: + try: + snapshot_reserve = int(snapshot_reserve.strip()) + except Exception as err: + err_msg = _('Config snapshot reserve error. The reason is: ' + '%s') % err + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + if 0 <= snapshot_reserve <= 50: + setattr(self.config, 'snapshot_reserve', snapshot_reserve) + else: + err_msg = _("The snapshot reservation percentage can only be " + "between 0 and 50%") + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + else: + setattr(self.config, 'snapshot_reserve', 20) + + def get_metro_info(self): + metro_infos = self.config.safe_get('metro_info') + if not metro_infos: + return [] + metro_configs = [] + for metro_info in metro_infos: + metro_config = {} + metro_config['metro_domain'] = metro_info['metro_domain'] + metro_config['local_vStore_name'] = metro_info['local_vStore_name'] + metro_config['remote_vStore_name'] = \ + metro_info['remote_vStore_name'] + metro_config['remote_backend'] = metro_info['remote_backend'] + metro_config['metro_logic_ip'] = metro_info['metro_logic_ip'] + metro_configs.append(metro_config) + + return metro_configs diff --git a/Manila/Ussuri/huawei_nas.py b/Manila/Ussuri/huawei_nas.py new file mode 100644 index 0000000..68a8bb3 --- /dev/null +++ b/Manila/Ussuri/huawei_nas.py @@ -0,0 +1,1602 @@ +# Copyright (c) 2014 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import json +import math +import os +import re +import tempfile + +from oslo_config import cfg +from oslo_config import types +from oslo_log import log +import oslo_messaging as messaging +from oslo_utils import strutils +from oslo_utils import units + +from manila.common import constants as common_constants +from manila import context as manila_context +from manila.data import utils as data_utils +from manila import exception +from manila.i18n import _ +from manila import rpc +from manila.share import driver +from manila.share import utils as share_utils +from manila import utils + +from manila.share.drivers.huawei import constants +from manila.share.drivers.huawei import helper +from manila.share.drivers.huawei import huawei_config +from manila.share.drivers.huawei import huawei_utils +from manila.share.drivers.huawei import hypermetro +from manila.share.drivers.huawei import manager +from manila.share.drivers.huawei import replication +from manila.share.drivers.huawei import rpcapi +from manila.share.drivers.huawei import smartx + +huawei_opts = [ + cfg.StrOpt('manila_huawei_conf_file', + default='/etc/manila/manila_huawei_conf.xml', + help='The configuration file for the Manila Huawei driver.'), + cfg.BoolOpt('local_replication', + default=False, + help='The replication type of backend Huawei storage.'), + cfg.MultiOpt('metro_info', + item_type=types.Dict(), + secret=True, + help='Multi opt of dictionaries to represent a hypermetro ' + 'target device. This option may be specified multiple ' + 'times in a single config section to specify multiple ' + 'hypermetro target devices. Each entry takes the ' + 'standard dict config form: hypermetro_device = ' + 'key1:value1,key2:value2...'), +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts) +LOG = log.getLogger(__name__) + + +class HuaweiNasDriver(driver.ShareDriver): + def __init__(self, *args, **kwargs): + super(HuaweiNasDriver, self).__init__((True, False), *args, **kwargs) + self.configuration.append_config_values(huawei_opts) + self.huawei_config = huawei_config.HuaweiConfig(self.configuration) + + self.helper = helper.RestHelper( + self.configuration.nas_address, self.configuration.nas_user, + self.configuration.nas_password) + self.metro_domain = None + self.remote_backend = None + self.vstore_pair_id = None + + self.replica_mgr = replication.ReplicaPairManager(self.helper) + self.metro_mgr = hypermetro.HyperPairManager(self.helper, + self.configuration) + self.smart_qos = smartx.SmartQos(self.helper) + self.smart_partition = smartx.SmartPartition(self.helper) + self.smart_cache = smartx.SmartCache(self.helper) + self.rpc_client = rpcapi.HuaweiAPI() + self.feature_supports = {} + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_config() + self._check_storage_pools() + self._get_metro_info() + + def do_setup(self, context): + self.helper.login() + rpc_manager = manager.HuaweiManager(self, self.replica_mgr, + self.metro_mgr) + self._setup_rpc_server(rpc_manager.RPC_API_VERSION, [rpc_manager]) + + def _setup_rpc_server(self, server_version, endpoints): + host = "%s@%s" % (CONF.host, self.configuration.config_group) + target = messaging.Target(topic=self.rpc_client.topic, server=host, + version=server_version) + self.rpc_server = rpc.get_server(target, endpoints) + self.rpc_server.start() + + def _check_storage_pools(self): + s_pools = [] + pools = self.helper.get_all_pools() + for pool in pools: + if pool.get('USAGETYPE') in (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE) or \ + pool.get('NEWUSAGETYPE') in \ + (constants.FILE_SYSTEM_POOL_TYPE, + constants.DORADO_V6_POOL_TYPE): + s_pools.append(pool['NAME']) + + for pool_name in self.configuration.storage_pools: + if pool_name not in s_pools: + msg = _("Storage pool %s not exist.") % pool_name + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + def _check_config(self): + if (not self.configuration.driver_handles_share_servers and + not self.configuration.logical_ip and + not self.configuration.dns): + msg = _('driver_handles_share_servers or LogicalPortIP ' + 'or DNS must be set at least one.') + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + if self.configuration.snapshot_support \ + and self.configuration.replication_support: + msg = _('SnapshotSupport and ReplicationSupport cannot both ' + 'be set to True.') + LOG.error(msg) + raise exception.BadConfigurationException(reason=msg) + + def _get_metro_info(self): + metro_infos = self.huawei_config.get_metro_info() + metro_info = metro_infos[0] if metro_infos else {} + if metro_info: + self.metro_domain = metro_info.get("metro_domain") + self.remote_backend = metro_info.get("remote_backend") + local_vstore_name = metro_info.get("local_vStore_name") + remote_vstore_name = metro_info.get("remote_vStore_name") + self.metro_logic_ip = metro_info.get('metro_logic_ip') + + self.vstore_pair_id = huawei_utils.get_hypermetro_vstore_id( + self.helper, self.metro_domain, local_vstore_name, + remote_vstore_name) + # check_remote_metro_info + context = manila_context.get_admin_context() + self.rpc_client.check_remote_metro_info( + context, self.remote_backend, self.metro_domain, + local_vstore_name, remote_vstore_name, + self.vstore_pair_id) + + def _create_filesystem(self, share, pool_name, + share_fs_id=None, snapshot_id=None, context=None): + opts = huawei_utils.get_share_extra_specs_params( + share['share_type_id']) + + if ('LUNType' in opts and + opts['LUNType'] == constants.ALLOC_TYPE_THICK_FLAG): + if opts['dedupe'] or opts['compression']: + msg = _('Thick filesystem cannot use dedupe or compression.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if not (share_fs_id and snapshot_id): + pool_info = self.helper.get_pool_by_name(pool_name) + if not pool_info: + msg = _("Pool %s to create FS not exist.") % pool_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + params = { + "NAME": huawei_utils.share_name(share['name']), + "ALLOCTYPE": opts.get('LUNType', + constants.ALLOC_TYPE_THIN_FLAG), + "CAPACITY": huawei_utils.share_size(share['size']), + "PARENTID": pool_info['ID'], + "ENABLEDEDUP": opts['dedupe'], + "ENABLECOMPRESSION": opts['compression'], + "SNAPSHOTRESERVEPER": self.configuration.snapshot_reserve + } + else: + params = { + "NAME": huawei_utils.share_name(share['name']), + "ALLOCTYPE": opts.get('LUNType', + constants.ALLOC_TYPE_THIN_FLAG), + "PARENTFILESYSTEMID": share_fs_id, + "PARENTSNAPSHOTID": snapshot_id, + "SNAPSHOTRESERVEPER": self.configuration.snapshot_reserve + } + if opts.get('sectorsize'): + params["SECTORSIZE"] = int(opts['sectorsize']) * units.Ki + elif hasattr(self.configuration, 'sector_size'): + params["SECTORSIZE"] = int(self.configuration.sector_size + ) * units.Ki + + if opts.get('controllername'): + controller = self.helper.get_controller_id(opts['controllername']) + if controller: + params['OWNINGCONTROLLER'] = controller + + remote_vstore_id = None + if opts.get('hypermetro'): + vstore_info = self.helper.get_hypermetro_vstore_by_pair_id( + self.vstore_pair_id) + local_vstore_id = vstore_info.get('LOCALVSTOREID') + remote_vstore_id = vstore_info.get('REMOTEVSTOREID') + if local_vstore_id and remote_vstore_id: + params['vstoreId'] = local_vstore_id + + fs_id = self.helper.create_filesystem(params) + huawei_utils.wait_fs_online( + self.helper, fs_id, self.configuration.wait_interval, + self.configuration.timeout) + + try: + if opts['qos']: + self.smart_qos.add(opts['qos'], fs_id) + if opts['huawei_smartpartition']: + self.smart_partition.add(opts['partitionname'], fs_id) + if opts['huawei_smartcache']: + self.smart_cache.add(opts['cachename'], fs_id) + except Exception: + self._delete_filesystem(fs_id) + LOG.exception('Failed to add smartx to filesystem %s.', fs_id) + raise + + if opts.get('hypermetro'): + try: + params.update({'vstoreId': remote_vstore_id}) + remote_fs_id = self.rpc_client.create_remote_filesystem( + context, self.remote_backend, params) + except Exception as err: + self._delete_filesystem(fs_id) + LOG.exception('Failed to create remote filesystem.' + ' reason: %s', err) + raise + + try: + self.metro_mgr.create_metro_pair( + self.metro_domain, fs_id, remote_fs_id, + self.vstore_pair_id) + except Exception as err: + self._delete_filesystem(fs_id, metro=False) + params = {"ID": remote_fs_id} + self.rpc_client.delete_remote_filesystem( + context, self.remote_backend, params) + LOG.exception('Failed to create HyperMetro filesystem pair' + '%(fs_id)s. reason: %(err)s' + % {"fs_id": fs_id, "err": err}) + raise + + return fs_id + + def _get_remote_fs_id(self, fs_id, metro_info): + if fs_id == metro_info['LOCALOBJID']: + remote_fs_id = metro_info['REMOTEOBJID'] + elif fs_id == metro_info['REMOTEOBJID']: + remote_fs_id = metro_info['LOCALOBJID'] + else: + msg = (_("Filesystem %s is not belong to a HyperMetro " + "filesystem.") % fs_id) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return remote_fs_id + + def _get_metro_id_from_fs_info(self, fs_info): + if json.loads(fs_info.get('HYPERMETROPAIRIDS')): + metro_id = json.loads(fs_info.get('HYPERMETROPAIRIDS')) + if not metro_id: + msg = _("Filesystem is a HyperMetro, but failed to get the " + "metro id") + LOG.error(msg) + raise exception.ShareResourceNotFound(reason=msg) + metro_id = metro_id[0] + return metro_id + + def _delete_metro_filesystem(self, context, fs_id, fs_info): + metro_id = self._get_metro_id_from_fs_info(fs_info) + if not metro_id: + return + metro_info = self.helper.get_hypermetro_pair_by_id(metro_id) + remote_fs_id = self._get_remote_fs_id(fs_id, metro_info) + + try: + self.metro_mgr.delete_metro_pair(metro_id=metro_id) + except Exception as err: + msg = (_("Failed to delete HyperMetro filesystem pair " + "%(metro_id)s. Reason: %(err)s") + % {"metro_id": metro_id, "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + vstore_info = self.helper.get_hypermetro_vstore_by_pair_id( + self.vstore_pair_id) + try: + remote_vstore_id = vstore_info.get('REMOTEVSTOREID') + if remote_vstore_id: + params = {"ID": remote_fs_id, 'vstoreId': remote_vstore_id} + else: + params = {"ID": remote_fs_id} + self.rpc_client.delete_remote_filesystem( + context, self.remote_backend, params) + except Exception as err: + msg = (_("Failed to delete remote filesystem %(fs_id)s. " + "Reason: %(err)s") % {"fs_id": remote_fs_id, "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + try: + local_vstore_id = vstore_info.get('LOCALVSTOREID') + if local_vstore_id: + params = {"ID": fs_id, 'vstoreId': local_vstore_id} + else: + params = {"ID": fs_id} + self.helper.delete_filesystem(params) + except Exception as err: + msg = (_("Failed to delete local filesystem %(fs_id)s. " + "Reason: %(err)s") % {"fs_id": fs_id, "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _delete_filesystem(self, fs_id, context=None, metro=True): + fs_info = self.helper.get_fs_info_by_id(fs_id) + if fs_info['IOCLASSID']: + self.smart_qos.remove(fs_id, fs_info['IOCLASSID']) + if fs_info['CACHEPARTITIONID']: + self.smart_partition.remove(fs_id, fs_info['CACHEPARTITIONID']) + if fs_info['SMARTCACHEPARTITIONID']: + self.smart_cache.remove(fs_id, fs_info['SMARTCACHEPARTITIONID']) + + if json.loads(fs_info.get('HYPERMETROPAIRIDS')) and metro: + self._delete_metro_filesystem(context, fs_id, fs_info) + return + + params = {"ID": fs_id} + self.helper.delete_filesystem(params) + + def _create_share(self, share, fs_id): + share_name = share['name'] + share_proto = share['share_proto'] + + fs_info = self.helper.get_fs_info_by_id(fs_id) + vstore_id = fs_info.get('vstoreId') + + try: + self.helper.create_share(share_name, fs_id, share_proto, + vstore_id) + except Exception: + LOG.exception('Failed to create %(proto)s share for FS %(fs)s.', + {'proto': share_proto, 'fs': fs_id}) + raise + + def _get_export_location(self, share_name, share_proto, + share_server): + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + LOG.warning('FS %s to delete not exist.', share_name) + return + elif json.loads(fs_info.get('HYPERMETROPAIRIDS')): + metro = True + else: + metro = False + + if share_server: + if metro: + vstore_id = fs_info.get('vstoreId') + self.helper.modify_logical_port( + share_server['backend_details']['logical_port_id'], + vstore_id) + ips = [share_server['backend_details']['ip']] + else: + if not metro: + ips = self.configuration.logical_ip + else: + ips = [self.metro_logic_ip] + + dnses = self.configuration.dns + if dnses: + ips = dnses + + path_name = huawei_utils.share_name(share_name) + if share_proto == 'NFS': + locations = ['%s:/%s' % (ip, path_name) for ip in ips] + elif share_proto == 'CIFS': + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + path_name = huawei_utils.share_name(share_info.get('NAME')) + locations = [r'\\%s\%s' % (ip, path_name) for ip in ips] + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + return locations + + def create_share(self, context, share, share_server=None): + pool_name = share_utils.extract_host(share['host'], level='pool') + if not pool_name: + msg = _("Pool is not available in host %s.") % share['host'] + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + fs_id = self._create_filesystem(share, pool_name, context=context) + self._create_share(share, fs_id) + return self._get_export_location( + share['name'], share['share_proto'], share_server) + + def rpc_delete_share(self, context, share_name, share_proto): + fs_info = self.helper.get_fs_info_by_name(share_name) + vstore_id = fs_info.get('vstoreId') + share_info = self.helper.get_share_by_name( + share_name, share_proto, vstore_id) + if share_info: + self.helper.delete_share( + share_info['ID'], share_proto, vstore_id) + self._delete_filesystem(fs_info['ID'], context) + + def delete_share(self, context, share, share_server=None): + share_name = share['name'] + share_proto = share['share_proto'] + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + LOG.warning('FS %s to delete not exist.', share_name) + return + if json.loads(fs_info.get('HYPERMETROPAIRIDS')): + self.rpc_client.get_remote_fs_info( + context, share_name, self.remote_backend) + if self._check_is_active_client(): + self.rpc_delete_share(context, share_name, share_proto) + else: + self.rpc_client.delete_share( + context, share_name, share_proto, self.remote_backend) + else: + self.rpc_delete_share(context, share_name, share_proto) + + def _update_filesystem(self, fs_info, params): + fs_id = fs_info.get('ID') + if json.loads(fs_info.get('HYPERMETROPAIRIDS')): + metro_id = self._get_metro_id_from_fs_info(fs_info) + metro_info = self.helper.get_hypermetro_pair_by_id(metro_id) + remote_fs_id = self._get_remote_fs_id(fs_id, metro_info) + + try: + context = manila_context.get_admin_context() + self.rpc_client.update_filesystem(context, self.remote_backend, + remote_fs_id, params) + except Exception as err: + msg = (_("Failed to update remote filesystem %(fs_id)s. " + "Reason: %(err)s") % + {"fs_id": remote_fs_id, "err": err}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + self.helper.update_filesystem(fs_id, params) + + def extend_share(self, share, new_size, share_server): + share_name = share['name'] + share_proto = share['share_proto'] + fs_info = self.helper.get_fs_info_by_name(share_name) + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + if not share_info: + msg = _("share %s does not exist.") % share_name + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + fs_info = self.helper.get_fs_info_by_name(share_name) + size = new_size * constants.CAPACITY_UNIT + params = {"CAPACITY": size} + self._update_filesystem(fs_info, params) + + def shrink_share(self, share, new_size, share_server): + share_name = share['name'] + share_proto = share['share_proto'] + fs_info = self.helper.get_fs_info_by_name(share_name) + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + if not share_info: + msg = _("share %s does not exist.") % share_name + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + fs_info = self.helper.get_fs_info_by_name(share_name) + fs_id = fs_info['ID'] + size = new_size * constants.CAPACITY_UNIT + used_size = int(fs_info['MINSIZEFSCAPACITY']) + if used_size > size: + LOG.error('FS %(id)s already uses %(used)d capacity. ' + 'Cannot shrink to %(newsize)d.', + {'id': fs_id, 'used': used_size, 'newsize': size}) + raise exception.ShareShrinkingPossibleDataLoss( + share_id=share['id']) + + params = {"CAPACITY": size} + self._update_filesystem(fs_info, params) + + def create_snapshot(self, context, snapshot, share_server=None): + fs_info = self.helper.get_fs_info_by_name(snapshot['share_name']) + if not fs_info: + msg = _("FS %s not exist.") % snapshot['share_name'] + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + snapshot_id = self.helper.create_snapshot( + fs_info['ID'], snapshot['name']) + return {'provider_location': snapshot_id} + + def delete_snapshot(self, context, snapshot, share_server=None): + provider_location = snapshot.get('provider_location') + if provider_location and '@' in provider_location: + snapshot_id = provider_location + else: + fs_info = self.helper.get_fs_info_by_name( + snapshot['share_name']) + if not fs_info: + LOG.warning('Parent FS of snapshot %s to delete not exist.', + snapshot['id']) + return + snapshot_id = huawei_utils.snapshot_id( + fs_info['ID'], snapshot['name']) + self.helper.delete_snapshot(snapshot_id) + + def _update_storage_supports(self): + feature_status = self.helper.get_feature_status() + + for f in ('SmartThin', 'SmartQoS', 'SmartPartition', 'SmartCache', + 'HyperMetro', 'HyperReplication', 'HyperSnap'): + self.feature_supports[f] = (feature_status.get(f) in + constants.AVAILABLE_FEATURE_STATUS) + + self.feature_supports['SmartDedup'] = False + self.feature_supports['SmartCompression'] = False + + for f in feature_status: + if re.match('SmartDedup[\s\S]*FS', f): + self.feature_supports['SmartDedup'] = ( + feature_status[f] in constants.AVAILABLE_FEATURE_STATUS) + if re.match('SmartCompression[\s\S]*FS', f): + self.feature_supports['SmartCompression'] = ( + feature_status[f] in constants.AVAILABLE_FEATURE_STATUS) + + LOG.info('Update feature support: %s.', self.feature_supports) + + def _update_share_stats(self): + self.huawei_config.update_configs() + self._update_storage_supports() + + backend_name = self.configuration.safe_get('share_backend_name') + data = { + 'share_backend_name': backend_name or 'HUAWEI_NAS_Driver', + 'vendor_name': 'Huawei', + 'driver_version': '2.2.RC1', + 'storage_protocol': 'NFS_CIFS', + 'snapshot_support': (self.feature_supports['HyperSnap'] + and self.configuration.snapshot_support), + } + + data['revert_to_snapshot_support'] = data['snapshot_support'] + + # Huawei storage doesn't support snapshot replication, so driver can't + # create replicated snapshot, this's not fit the requirement of Manila + # replication feature. + # To avoid this problem, we specify Huawei driver can't support + # snapshot and replication both, as a workaround. + if (not data['snapshot_support'] and + self.feature_supports['HyperReplication'] and + self.configuration.replication_support): + data['replication_type'] = 'dr' + + def _get_capacity(pool_info): + return { + 'TOTALCAPACITY': float(pool_info['USERTOTALCAPACITY'] + ) / constants.CAPACITY_UNIT, + 'FREECAPACITY': float(pool_info['USERFREECAPACITY'] + ) / constants.CAPACITY_UNIT, + 'CONSUMEDCAPACITY': float(pool_info['USERCONSUMEDCAPACITY'] + ) / constants.CAPACITY_UNIT, + 'PROVISIONEDCAPACITY': float(pool_info['TOTALFSCAPACITY'] + ) / constants.CAPACITY_UNIT, + } + + def _get_disk_type(pool_info): + pool_disk = [] + for i, x in enumerate(['ssd', 'sas', 'nl_sas']): + if ('TIER%dCAPACITY' % i in pool_info and + pool_info['TIER%dCAPACITY' % i] != '0'): + pool_disk.append(x) + + if len(pool_disk) > 1: + pool_disk = ['mix'] + + return pool_disk[0] if pool_disk else None + + pools = [] + for pool_name in self.configuration.storage_pools: + pool = {'pool_name': pool_name} + + pool_info = self.helper.get_pool_by_name(pool_name, + log_filter=True) + if pool_info: + capacity = _get_capacity(pool_info) + pool['huawei_disk_type'] = _get_disk_type(pool_info) + else: + capacity = {} + + pool.update({ + 'max_over_subscription_ratio': self.configuration.safe_get( + 'max_over_subscription_ratio'), + 'total_capacity_gb': capacity.get('TOTALCAPACITY', 0.0), + 'free_capacity_gb': capacity.get('FREECAPACITY', 0.0), + 'provisioned_capacity_gb': + capacity.get('PROVISIONEDCAPACITY', 0.0), + 'allocated_capacity_gb': capacity.get('CONSUMEDCAPACITY', 0.0), + 'reserved_percentage': 0, + 'qos': [self.feature_supports['SmartQoS'], False], + 'huawei_smartcache': + [self.feature_supports['SmartCache'], False], + 'huawei_smartpartition': + [self.feature_supports['SmartPartition'], False], + 'dedupe': [self.feature_supports['SmartDedup'], False], + 'compression': + [self.feature_supports['SmartCompression'], False], + }) + + if self.configuration.nas_product != "Dorado": + pool['thin_provisioning'] = \ + [self.feature_supports['SmartThin'], False] + else: + pool['thin_provisioning'] = True + + if self.metro_domain and self._check_is_active_client(): + pool['hypermetro'] = self.feature_supports['HyperMetro'] + else: + pool['hypermetro'] = False + + pools.append(pool) + data['pools'] = pools + super(HuaweiNasDriver, self)._update_share_stats(data) + + def _check_is_active_client(self): + vstore_pair_info = self.helper.get_hypermetro_vstore_by_pair_id( + self.vstore_pair_id) + active_flag = vstore_pair_info.get('ACTIVEORPASSIVE') + if active_flag == '0': + return True + return False + + def _get_access_for_share_copy(self, share): + share_proto = share['share_proto'] + access = {'access_level': common_constants.ACCESS_LEVEL_RW} + if share_proto == 'NFS': + access['access_to'] = self.configuration.nfs_client_ip + access['access_type'] = 'ip' + else: + access['access_to'] = self.configuration.cifs_client_name + access['access_password'] = self.configuration.cifs_client_password + access['access_type'] = 'user' + + LOG.info("Get access %(access)s for share %(share)s copy.", + {'access': access, 'share': share['name']}) + return access + + def create_share_from_snapshot(self, context, share, snapshot, + share_server=None, parent_share=None): + share_fs_info = self.helper.get_fs_info_by_name( + snapshot['share_name']) + if not share_fs_info: + LOG.error('share %s of snapshot is not existed.', + snapshot['share_name']) + raise exception.StorageResourceNotFound( + name=snapshot['share_name']) + if json.loads(share_fs_info.get('HYPERMETROPAIRIDS')): + msg = _("HyperMetro Pair Share does not support " + "create from snapshot") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + share_fs_id = share_fs_info['ID'] + snapshot_id = huawei_utils.snapshot_id( + share_fs_id, snapshot['name']) + + done = True + + if snapshot['snapshot']['share_proto'] == share['share_proto']: + try: + location = self._create_from_snapshot_by_clone( + context, share, share_fs_id, snapshot_id, share_server) + except Exception: + LOG.warning('Create share by backend clone failed, ' + 'try host copy.') + done = False + else: + LOG.info('Share protocol is inconsistent, will use host copy.') + done = False + + if not done: + location = self._create_from_snapshot_by_host( + context, share, snapshot, share_server) + + return location + + def _create_from_snapshot_by_clone(self, context, share, share_fs_id, + snapshot_id, share_server): + fs_id = self._create_filesystem(share, None, share_fs_id, + snapshot_id, context) + fs_info = self.helper.get_fs_info_by_id(fs_id) + clone_size = int(fs_info['CAPACITY']) + new_size = int(share['size']) * units.Mi * 2 + + try: + if new_size != clone_size: + param = {"CAPACITY": new_size} + self.helper.update_filesystem(fs_id, param) + + self.helper.split_clone_fs(fs_id) + + def _split_done(): + fs_info = self.helper.get_fs_info_by_id(fs_id) + return fs_info['ISCLONEFS'] != 'true' + + huawei_utils.wait_for_condition(_split_done, 5, 3600 * 24) + except Exception: + LOG.exception('Create clone FS %s error.', fs_id) + self.delete_share(context, share) + raise + + share_name = share['name'] + share_proto = share['share_proto'] + fs_info = self.helper.get_fs_info_by_name(share_name) + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + if not share_info: + self._create_share(share, fs_id) + else: + accesses = self.helper.get_all_share_access( + share_info['ID'], share_proto) + for i in accesses: + self.helper.remove_access(i['ID'], share_proto) + + return self._get_export_location(share_name, share_proto, share_server) + + def _create_from_snapshot_by_host(self, context, share, snapshot, + share_server=None): + src_share_proto = snapshot['snapshot']['share_proto'] + src_share_name = snapshot['share_name'] + src_share = {'name': src_share_name, + 'share_proto': src_share_proto} + src_access = self._get_access_for_share_copy(src_share) + try: + self.allow_access(context, src_share, src_access) + except Exception: + LOG.exception('Failed to add access to src share %s for copy.', + src_share_name) + raise + + dst_share = share + dst_export_paths = self.create_share(context, dst_share, share_server) + dst_access = self._get_access_for_share_copy(dst_share) + try: + self.allow_access(context, dst_share, dst_access) + except Exception: + LOG.exception('Failed to add access to dst share %s for copy.', + dst_share['name']) + self.deny_access(context, src_share, src_access) + raise + + src_mount_dir = tempfile.mkdtemp(prefix=constants.TMP_PATH_SRC_PREFIX) + src_export_paths = self._get_export_location( + src_share_name, src_share_proto, share_server) + dst_mount_dir = tempfile.mkdtemp(prefix=constants.TMP_PATH_DST_PREFIX) + + try: + self._copy_share_data( + src_share_proto, src_access, src_export_paths, src_mount_dir, + snapshot['name'], dst_share['share_proto'], dst_access, + dst_export_paths, dst_mount_dir) + except Exception: + LOG.exception('Copy share data from %(src)s to %(dst)s error.', + {'src': src_export_paths, 'dst': dst_export_paths}) + self.delete_share(context, dst_share, share_server) + raise + finally: + try: + os.rmdir(src_mount_dir) + os.rmdir(dst_mount_dir) + except Exception: + LOG.exception('Remove temp files error.') + self.deny_access(context, src_share, src_access) + self.deny_access(context, dst_share, dst_access) + + return dst_export_paths + + def _copy_share_data(self, src_share_proto, src_access, src_export_paths, + src_mount_dir, snapshot_name, dst_share_proto, + dst_access, dst_export_paths, dst_mount_dir): + try: + self._mount_share_to_host(src_share_proto, src_access, + src_export_paths, src_mount_dir) + except Exception: + LOG.exception('Mount src share %s failed.', src_export_paths) + raise + + try: + self._mount_share_to_host(dst_share_proto, dst_access, + dst_export_paths, dst_mount_dir) + except Exception: + LOG.exception('Mount dst share %s failed.', dst_export_paths) + self._umount_share_from_host(src_mount_dir) + raise + + src_path = '/'.join((src_mount_dir, '.snapshot', + huawei_utils.snapshot_name(snapshot_name))) + try: + self._copy_data(src_path, dst_mount_dir) + finally: + self._umount_share_from_host(src_mount_dir) + self._umount_share_from_host(dst_mount_dir) + + def _copy_data(self, src_path, dst_path): + LOG.info("Copy data from src %s to dst %s.", src_path, dst_path) + + copy = data_utils.Copy(src_path, dst_path, '') + copy.run() + if copy.get_progress()['total_progress'] != 100: + msg = _('Copy data from src %(src)s to dst %(dst)s error.' + ) % {'src': src_path, 'dst': dst_path} + LOG.error(msg) + raise exception.ShareCopyDataException(reason=msg) + + def _umount_share_from_host(self, mount_dir): + utils.execute('umount', mount_dir, run_as_root=True) + + def _mount_share_to_host(self, share_proto, access, export_paths, + mount_dir): + LOG.info("Mount share %(share)s to dir %(dir)s.", + {'share': export_paths, 'dir': mount_dir}) + + for path in export_paths: + if share_proto == 'NFS': + exe_args = ('mount', '-t', 'nfs', path, mount_dir) + else: + user = 'username=%s,password=%s' % ( + access['access_to'], access['access_password']) + exe_args = ('mount', '-t', 'cifs', path, mount_dir, + '-o', user) + + try: + utils.execute(*exe_args, run_as_root=True) + except Exception: + LOG.exception('Mount share %s error', path) + continue + else: + return + + msg = ("Cannot mount share %(share)s to dir %(dir)s.", + {'share': export_paths, 'dir': mount_dir}) + LOG.error(msg) + raise exception.ShareMountException(reason=msg) + + def rpc_deny_access(self, context, params): + share_name = params['name'] + share_proto = params['share_proto'] + access_to = params['access_to'] + access_type = params['access_type'] + if (share_proto == 'NFS' and access_type not in ('ip', 'user') + or share_proto == 'CIFS' and access_type != 'user'): + LOG.warning('Access type invalid for %s share.', share_proto) + return + + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + LOG.warning('FS %s to deny access not exist.', share_name) + return + + vstore_id = fs_info.get('vstoreId') + share_info = self.helper.get_share_by_name( + share_name, share_proto, vstore_id) + if not share_info: + LOG.warning('Share %s not exist for denying access.', share_name) + return + + access = self.helper.get_share_access( + share_info['ID'], access_to, share_proto, vstore_id) + if not access: + LOG.warning('Access %(access)s not exist in share %(share)s.', + {'access': access_to, 'share': share_name}) + return + + self.helper.remove_access(access['ID'], share_proto, vstore_id) + + def deny_access(self, context, share, access, share_server=None): + share_name = share['name'] + share_proto = share['share_proto'] + access_to, access_type, access_level = huawei_utils.get_access_info( + access) + params = {"name": share_name, + "share_proto": share_proto, + "access_to": access_to, + "access_type": access_type} + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + LOG.warning("FS %s is not exist.", share['name']) + return + if json.loads(fs_info.get('HYPERMETROPAIRIDS')): + self.rpc_client.get_remote_fs_info( + context, share_name, self.remote_backend) + if self._check_is_active_client(): + self.rpc_deny_access(context, params) + else: + self.rpc_client.deny_access(context, params, + self.remote_backend) + else: + self.rpc_deny_access(context, params) + + def rpc_allow_access(self, context, params): + share_name = params['name'] + share_proto = params['share_proto'] + access_to = params['access_to'] + access_type = params['access_type'] + access_level = params['access_level'] + share_type_id = params['share_type_id'] + if share_proto == 'NFS': + if access_type not in ('user', 'ip'): + msg = _('Only ip or user access types ' + 'are allowed for NFS share.') + raise exception.InvalidShareAccess(reason=msg) + if access_type == 'user': + # Use 'user' type as netgroup for NFS. + access_to = '@' + access_to + + if access_level == common_constants.ACCESS_LEVEL_RW: + access_level = constants.ACCESS_NFS_RW + else: + access_level = constants.ACCESS_NFS_RO + elif share_proto == 'CIFS': + if access_type != 'user': + msg = _('Only user access type is allowed for CIFS share.') + raise exception.InvalidShareAccess(reason=msg) + if access_level == common_constants.ACCESS_LEVEL_RW: + access_level = constants.ACCESS_CIFS_FULLCONTROL + else: + access_level = constants.ACCESS_CIFS_RO + + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + msg = _("FS %s to allow access not exist.") % share_name + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + vstore_id = fs_info.get('vstoreId') + share_info = self.helper.get_share_by_name( + share_name, share_proto, vstore_id) + if not share_info: + msg = _("Share %s not exist.") % share_name + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + share_access = self.helper.get_share_access( + share_info['ID'], access_to, share_proto, vstore_id) + if share_access: + if (('ACCESSVAL' in share_access and + share_access['ACCESSVAL'] != access_level) + or ('PERMISSION' in share_access + and share_access['PERMISSION'] != access_level)): + self.helper.change_access( + share_access['ID'], share_proto, access_level, + vstore_id) + else: + self.helper.allow_access( + share_info['ID'], access_to, share_proto, access_level, + share_type_id, vstore_id) + + def allow_access(self, context, share, access, share_server=None): + share_name = share['name'] + share_proto = share['share_proto'] + access_to, access_type, access_level = huawei_utils.get_access_info( + access) + params = {"name": share_name, + "share_proto": share_proto, + "access_to": access_to, + "access_type": access_type, + "access_level": access_level, + "share_type_id": share.get("share_type_id")} + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + msg = _("FS %s to allow access not exist.") % share_name + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + if json.loads(fs_info.get('HYPERMETROPAIRIDS')): + self.rpc_client.get_remote_fs_info( + context, share_name, self.remote_backend) + if self._check_is_active_client(): + self.rpc_allow_access(context, params) + else: + self.rpc_client.allow_access( + context, params, self.remote_backend) + else: + self.rpc_allow_access(context, params) + + def update_access(self, context, share, access_rules, add_rules, + delete_rules, share_server=None): + def _access_handler(rules, handler): + for access in rules: + try: + handler(context, share, access, share_server) + except Exception: + LOG.exception( + 'Failed to %(handler)s access %(access)s for share ' + '%(share)s.', + {'handler': handler.__name__, + 'access': huawei_utils.get_access_info(access), + 'share': share['name']}) + raise + + if not add_rules and not delete_rules: + _access_handler(access_rules, self.allow_access) + else: + _access_handler(delete_rules, self.deny_access) + _access_handler(add_rules, self.allow_access) + + def get_pool(self, share): + fs_info = self.helper.get_fs_info_by_name(share['name']) + if fs_info: + return fs_info['PARENTNAME'] + + def manage_existing(self, share, driver_options): + share_proto = share['share_proto'] + + old_export_location = share['export_locations'][0]['path'] + old_share_ip, old_share_name = huawei_utils.get_share_by_location( + old_export_location, share_proto) + if not old_share_name: + msg = _('Export location %s is invalid.') % old_export_location + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if old_share_ip not in self.configuration.logical_ip \ + and old_share_ip not in self.configuration.dns: + msg = _('IP %s inconsistent with logical IP.') % old_share_ip + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + fs_info = self.helper.get_fs_info_by_name(old_share_name) + share_info = self.helper.get_share_by_name( + old_share_name, share_proto, fs_info.get('vstoreId')) + if not share_info: + msg = _("Share %s not exist.") % old_share_name + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + fs_id = share_info['FSID'] + fs_info = self.helper.get_fs_info_by_id(fs_id) + if (fs_info['HEALTHSTATUS'] != constants.STATUS_FS_HEALTH or + fs_info['RUNNINGSTATUS'] != constants.STATUS_FS_RUNNING): + msg = _("FS %s status is abnormal.") % fs_id + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + if (json.loads(fs_info['REMOTEREPLICATIONIDS']) or + json.loads(fs_info['HYPERMETROPAIRIDS'])): + msg = _("FS %s has been associated to other feature, " + "cannot manage it.") % fs_id + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + pool_name = share_utils.extract_host(share['host'], level='pool') + if pool_name and pool_name != fs_info['PARENTNAME']: + msg = _("FS %(id)s pool is inconsistent with %(pool)s." + ) % {'id': fs_id, 'pool': pool_name} + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + opts = huawei_utils.get_share_extra_specs_params( + share['share_type_id']) + if 'LUNType' in opts and fs_info['ALLOCTYPE'] != opts['LUNType']: + msg = _("FS %(id)s type is inconsistent with %(type)s." + ) % {'id': fs_id, 'type': opts['LUNType']} + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + if (fs_info['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK_FLAG and + (opts['compression'] or opts['dedupe'])): + msg = _('Dedupe or compression cannot be set for thick FS.') + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + share_params = {'DESCRIPTION': share['name']} + self.helper.update_share(share_info['ID'], share_proto, share_params) + share_size = self._retype_filesystem(opts, fs_info, share['name']) + + locations = self._get_export_location(share['name'], share_proto, None) + return {'size': share_size, 'export_locations': locations} + + def _retype_filesystem(self, new_opts, fs_info, new_share_name): + fs_id = fs_info['ID'] + + if new_opts['huawei_smartpartition']: + if fs_info['CACHEPARTITIONID']: + self.smart_partition.update(fs_id, new_opts['partitionname'], + fs_info['CACHEPARTITIONID']) + else: + self.smart_partition.add(new_opts['partitionname'], fs_id) + elif fs_info['CACHEPARTITIONID']: + self.smart_partition.remove(fs_id, fs_info['CACHEPARTITIONID']) + + if new_opts['huawei_smartcache']: + if fs_info['SMARTCACHEPARTITIONID']: + self.smart_cache.update(fs_id, new_opts['cachename'], + fs_info['SMARTCACHEPARTITIONID']) + else: + self.smart_cache.add(new_opts['cachename'], fs_id) + elif fs_info['SMARTCACHEPARTITIONID']: + self.smart_cache.remove(fs_id, fs_info['SMARTCACHEPARTITIONID']) + + if new_opts['qos']: + if fs_info['IOCLASSID']: + self.smart_qos.update(fs_id, new_opts['qos'], + fs_info['IOCLASSID']) + else: + self.smart_qos.add(new_opts['qos'], fs_id) + elif fs_info['IOCLASSID']: + self.smart_qos.remove(fs_id, fs_info['IOCLASSID']) + + fs_param = {"NAME": huawei_utils.share_name(new_share_name), + "DESCRIPTION": new_share_name, + } + + compression = strutils.bool_from_string(fs_info['ENABLECOMPRESSION']) + if new_opts['compression'] and not compression: + fs_param["ENABLECOMPRESSION"] = True + elif compression: + fs_param["ENABLECOMPRESSION"] = False + + dedupe = strutils.bool_from_string(fs_info['ENABLEDEDUP']) + if new_opts['dedupe'] and not dedupe: + fs_param["ENABLEDEDUP"] = True + elif dedupe: + fs_param["ENABLEDEDUP"] = False + + cur_size = int(fs_info['CAPACITY']) / constants.CAPACITY_UNIT + new_size = math.ceil(float(fs_info['CAPACITY']) / + constants.CAPACITY_UNIT) + if cur_size != new_size: + fs_param["CAPACITY"] = new_size * constants.CAPACITY_UNIT + + if new_opts['sectorsize']: + sectorsize = int(new_opts['sectorsize']) * units.Ki + if sectorsize != int(fs_info['SECTORSIZE']): + fs_param['SECTORSIZE'] = sectorsize + + self.helper.update_filesystem(fs_id, fs_param) + return new_size + + def unmanage(self, share): + pass + + def manage_existing_snapshot(self, snapshot, driver_options): + fs_info = self.helper.get_fs_info_by_name(snapshot['share_name']) + if not fs_info: + msg = _("Parent FS %(fs)s of snapshot %(snap)s not exist." + ) % {'snap': snapshot['id'], + 'fs': snapshot['share_name']} + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + snapshot_id = fs_info['ID'] + "@" + snapshot['provider_location'] + snapshot_info = self.helper.get_snapshot_by_id(snapshot_id) + if snapshot_info['HEALTHSTATUS'] != constants.STATUS_SNAPSHOT_HEALTH: + msg = _("Snapshot %s is abnormal, cannot import.") % snapshot_id + LOG.error(msg) + raise exception.ManageInvalidShareSnapshot(reason=msg) + + snapshot_name = huawei_utils.snapshot_name(snapshot['name']) + self.helper.rename_snapshot(snapshot_id, snapshot_name) + snapshot_id = huawei_utils.snapshot_id( + fs_info['ID'], snapshot_name) + return {'provider_location': snapshot_id} + + def get_network_allocations_number(self): + if self.configuration.driver_handles_share_servers: + return 1 + else: + return 0 + + def _setup_server(self, network_info, metadata=None): + LOG.info('To setup server: %s.', network_info) + network_type = network_info['network_type'] + if network_type not in constants.VALID_NETWORK_TYPE: + msg = _('Network type %s is invalid.') % network_type + LOG.error(msg) + raise exception.NetworkBadConfigurationException(reason=msg) + + vlan_tag = network_info['segmentation_id'] or 0 + ip = network_info['network_allocations'][0]['ip_address'] + ip_addr = ipaddress.ip_address(ip) + subnet = utils.cidr_to_netmask(network_info['cidr']) + + ad, ldap = self._get_security_service( + network_info['security_services']) + + ad_created = False + ldap_created = False + if ad: + self._configure_ad(ad) + ad_created = True + if ldap: + self._configure_ldap(ldap) + ldap_created = True + + try: + vlan_id, logical_port_id = self._create_logical_port( + vlan_tag, ip, ip_addr.version, subnet) + except exception.ManilaException: + if ad_created: + self.helper.delete_ad_config(ad['user'], ad['password']) + self.helper.set_dns_ip_address([]) + if ldap_created: + self.helper.delete_ldap_config() + raise + + server_details = {'ip': ip, + 'logical_port_id': logical_port_id, + } + if vlan_id: + server_details['vlan_id'] = vlan_id + return server_details + + def _get_security_service(self, security_services): + active_directory = None + ldap = None + for ss in security_services: + if ss['type'] == 'active_directory': + active_directory = ss + elif ss['type'] == 'ldap': + ldap = ss + return active_directory, ldap + + def _configure_ad(self, active_directory): + dns_ip = active_directory['dns_ip'] + user = active_directory['user'] + password = active_directory['password'] + domain = active_directory['domain'] + if not dns_ip or not user or not password or not domain: + msg = (_("(%(dns_ip)s, %(user)s, %(password)s, %(domain)s) of " + "active_directory invalid.") + % {"dns_ip": dns_ip, "user": user, + "password": password, "domain": domain}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + # Check DNS server exists or not. + ip_address = self.helper.get_dns_ip_address() + if ip_address: + msg = _("DNS server %s has already been configured." + ) % ip_address + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + # Check AD config exists or not. + ad_config = self.helper.get_ad_config() + if ad_config: + msg = _("AD domain %s has already been configured." + ) % ad_config['FULLDOMAINNAME'] + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + # Set DNS server ip. + self.helper.set_dns_ip_address([dns_ip]) + + def _check_ad_status(): + ad = self.helper.get_ad_config() + if not ad or ad['DOMAINSTATUS'] == constants.AD_JOIN_FAILED: + msg = _('AD domain status is failed.') + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + return ad['DOMAINSTATUS'] == constants.AD_JOIN_DOMAIN + + # Set AD config. + try: + self.helper.add_ad_config(user, password, domain) + huawei_utils.wait_for_condition( + _check_ad_status, self.configuration.wait_interval, + self.configuration.timeout) + except exception.ManilaException: + self.helper.set_dns_ip_address([]) + msg = _('Failed to add AD config.') + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def _configure_ldap(self, ldap): + server = ldap['server'] + domain = ldap['domain'] + if not server or not domain: + msg = (_("(%(server)s, %(domain)s) of ldap invalid.") + % {"server": server, "domain": domain}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + # Check LDAP config exists or not. + ldap_info = self.helper.get_ldap_config() + if ldap_info: + err_msg = _("LDAP domain (%s) has already been configured." + ) % ldap_info['LDAPSERVER'] + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + if len(server.split(',')) > 3: + msg = _("Server IPs of ldap greater than 3.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + # Set LDAP config. + self.helper.add_ldap_config(server, domain) + + def _create_logical_port(self, vlan_tag, ip, ip_type, subnet): + vlan_id = None + if vlan_tag: + vlans = self.helper.get_vlan_by_tag(vlan_tag) + if vlans: + vlan_id = vlans[0]['ID'] + else: + port, port_type = self._get_optimal_port() + vlan_id = self.helper.create_vlan( + port['id'], port_type, vlan_tag) + home_port_id = vlan_id + home_port_type = constants.PORT_TYPE_VLAN + else: + port, port_type = self._get_optimal_port() + home_port_id = port['id'] + home_port_type = port_type + + logical_port = self.helper.get_logical_port_by_ip(ip, ip_type) + if not logical_port: + data = {"HOMEPORTID": home_port_id, + "HOMEPORTTYPE": home_port_type, + "NAME": ip, + "OPERATIONALSTATUS": True, + "SUPPORTPROTOCOL": 3, + } + if ip_type == 4: + data.update({"ADDRESSFAMILY": 0, + "IPV4ADDR": ip, + "IPV4MASK": subnet, + }) + else: + data.update({"ADDRESSFAMILY": 1, + "IPV6ADDR": ip, + "IPV6MASK": subnet, + }) + logical_port_id = self.helper.create_logical_port(data) + else: + logical_port_id = logical_port['ID'] + + return vlan_id, logical_port_id + + def _get_optimal_port(self): + eth_ports, bond_ports = self._get_valid_ports(self.configuration.ports) + logical_ports = self.helper.get_all_logical_port() + sorted_eths = self._sorted_ports(eth_ports, logical_ports) + sorted_bonds = self._sorted_ports(bond_ports, logical_ports) + + if sorted_eths and sorted_bonds: + if sorted_eths[0][1] >= sorted_bonds[0][1]: + return sorted_bonds[0][0], constants.PORT_TYPE_BOND + else: + return sorted_eths[0][0], constants.PORT_TYPE_ETH + elif sorted_eths: + return sorted_eths[0][0], constants.PORT_TYPE_ETH + elif sorted_bonds: + return sorted_bonds[0][0], constants.PORT_TYPE_BOND + else: + msg = _("Cannot find optimal port.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _get_valid_ports(self, config_ports): + eth_ports = self.helper.get_all_eth_port() + bond_ports = self.helper.get_all_bond_port() + + def _filter_eth_port(port): + return (constants.PORT_LINKUP == port['RUNNINGSTATUS'] + and not port['IPV4ADDR'] + and not port['IPV6ADDR'] + and not port['BONDNAME'] + and (not config_ports or port['LOCATION'] in config_ports) + ) + + def _filter_bond_port(port): + if (constants.PORT_LINKUP != port['RUNNINGSTATUS'] or + (config_ports and port['NAME'] not in config_ports)): + return False + port_ids = json.loads(port['PORTIDLIST']) + for eth in eth_ports: + if eth['ID'] in port_ids and ( + eth['IPV4ADDR'] or eth['IPV6ADDR']): + return False + return True + + valid_eth_ports = [{'id': eth['ID'], 'name': eth['LOCATION']} + for eth in eth_ports if _filter_eth_port(eth)] + valid_bond_ports = [{'id': bond['ID'], 'name': bond['NAME']} + for bond in bond_ports if _filter_bond_port(bond)] + return valid_eth_ports, valid_bond_ports + + def _sorted_ports(self, port_list, logical_ports): + def _get_port_weight(port): + weight = 0 + for logical in logical_ports: + if logical['HOMEPORTTYPE'] == constants.PORT_TYPE_VLAN: + pos = logical['HOMEPORTNAME'].rfind('.') + if logical['HOMEPORTNAME'][:pos] == port['name']: + weight += 1 + elif logical['HOMEPORTNAME'] == port['name']: + weight += 1 + return weight + + sorted_ports = [] + for port in port_list: + port_weight = _get_port_weight(port) + sorted_ports.append((port, port_weight)) + + return sorted(sorted_ports, key=lambda i: i[1]) + + def _teardown_server(self, server_details, security_services=None): + if 'logical_port_id' in server_details: + self.helper.delete_logical_port( + server_details['logical_port_id']) + + if 'vlan_id' in server_details: + self.helper.delete_vlan(server_details['vlan_id']) + + if not security_services: + return + + ad, ldap = self._get_security_service(security_services) + if ad: + ip_address = self.helper.get_dns_ip_address() + if ip_address and ip_address[0] == ad['dns_ip']: + self.helper.set_dns_ip_address([]) + + ad_config = self.helper.get_ad_config() + if ad_config and ad_config['FULLDOMAINNAME'] == ad['domain']: + self.helper.delete_ad_config(ad['user'], ad['password']) + + if ldap: + ldap_info = self.helper.get_ldap_config() + if (ldap_info and ldap_info['LDAPSERVER'] == ldap['server'] + and ldap_info['BASEDN'] == ldap['domain']): + self.helper.delete_ldap_config() + + def ensure_share(self, context, share, share_server=None): + share_proto = share['share_proto'] + share_name = share['name'] + fs_info = self.helper.get_fs_info_by_name(share_name) + if not fs_info: + LOG.error('FS %s not exist while ensuring.', share_name) + raise exception.ShareResourceNotFound(share_id=share['id']) + + if (fs_info['HEALTHSTATUS'] != constants.STATUS_FS_HEALTH or + fs_info['RUNNINGSTATUS'] != constants.STATUS_FS_RUNNING): + msg = _('FS %s status is abnormal.') % share_name + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + share_info = self.helper.get_share_by_name( + share_name, share_proto, fs_info.get('vstoreId')) + if not share_info: + LOG.error('Share %s not exist while ensuring.', share_name) + raise exception.ShareResourceNotFound(share_id=share['id']) + + return self._get_export_location(share_name, share_proto, share_server) + + def create_replica(self, context, replica_list, new_replica, + access_rules, replica_snapshots, share_server=None): + """Create a new share, and create a remote replication pair.""" + location = self.create_share(context, new_replica, share_server) + + try: + for access in access_rules: + self.allow_access(context, new_replica, access) + except Exception: + LOG.exception('Failed to allow access to new replica %s.', + new_replica['name']) + self.delete_share(context, new_replica, share_server) + raise + + # create a replication pair. + # replication pair only can be created by master node, + # so here is a remote call to trigger master node to + # start the creating progress. + try: + active_replica = share_utils.get_active_replica(replica_list) + remote_device_wwn = self.helper.get_array_wwn() + replica_fs = self.helper.get_fs_info_by_name( + new_replica['name']) + + (local_pair_id, replica_pair_id) = \ + self.rpc_client.create_replica_pair( + context, + active_replica['host'], + local_share_info={'name': active_replica['name']}, + remote_device_wwn=remote_device_wwn, + remote_fs_id=replica_fs['ID'], + local_replication=self.configuration.local_replication + ) + except Exception: + LOG.exception('Failed to create a replication pair ' + 'with host %s.', active_replica['host']) + self.delete_share(context, new_replica, share_server) + raise + + # Get the state of the new created replica + replica_state = self.replica_mgr.get_replica_state(local_pair_id) + replica_ref = { + 'export_locations': location, + 'replica_state': replica_state, + } + + return replica_ref + + def update_replica_state(self, context, replica_list, replica, + access_rules, replica_snapshots, + share_server=None): + replica_name = replica['name'] + replica_pair_id = huawei_utils.get_replica_pair_id( + self.helper, replica_name) + if not replica_pair_id: + LOG.error("No replication pair for replica %s.", replica_name) + return common_constants.STATUS_ERROR + + self.replica_mgr.update_replication_pair_state(replica_pair_id) + return self.replica_mgr.get_replica_state(replica_pair_id) + + def promote_replica(self, context, replica_list, replica, access_rules, + share_server=None): + replica_name = replica['name'] + replica_pair_id = huawei_utils.get_replica_pair_id( + self.helper, replica_name) + if not replica_pair_id: + msg = _("No replication pair for replica %s.") % replica_name + LOG.error(msg) + raise exception.ReplicationException(reason=msg) + + try: + self.replica_mgr.switch_over(replica_pair_id) + except Exception: + LOG.exception('Failed to promote replica %s.', replica_name) + raise + + old_active_replica = share_utils.get_active_replica(replica_list) + new_active_update = { + 'id': replica['id'], + 'replica_state': common_constants.REPLICA_STATE_ACTIVE, + } + + # get replica state for new secondary after switch over + replica_state = self.replica_mgr.get_replica_state(replica_pair_id) + old_active_update = { + 'id': old_active_replica['id'], + 'replica_state': replica_state, + } + + return [old_active_update, new_active_update] + + def delete_replica(self, context, replica_list, replica_snapshots, + replica, share_server=None): + replica_name = replica['name'] + replica_pair_id = huawei_utils.get_replica_pair_id( + self.helper, replica_name) + if not replica_pair_id: + LOG.warning("No replication pair for replica %s, " + "continue to delete it.", replica_name) + else: + self.replica_mgr.delete_replication_pair(replica_pair_id) + + try: + self.delete_share(context, replica, share_server) + except Exception: + LOG.exception('Failed to delete replica %s.', replica_name) + raise + + def revert_to_snapshot(self, context, snapshot, share_access_rules, + snapshot_access_rules, share_server=None): + fs_info = self.helper.get_fs_info_by_name(snapshot['share_name']) + if not fs_info: + msg = _("FS %s not exist.") % snapshot['share_name'] + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + snap_id = huawei_utils.snapshot_id(fs_info['ID'], snapshot['name']) + self.helper.rollback_snapshot(snap_id) diff --git a/Manila/Ussuri/huawei_utils.py b/Manila/Ussuri/huawei_utils.py new file mode 100644 index 0000000..721049a --- /dev/null +++ b/Manila/Ussuri/huawei_utils.py @@ -0,0 +1,279 @@ +# Copyright (c) 2015 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import retrying + +from oslo_log import log +from oslo_utils import strutils + +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants +from manila.share import share_types + + +LOG = log.getLogger(__name__) + + +def get_share_extra_specs_params(type_id): + specs = {} + if type_id: + specs = share_types.get_share_type_extra_specs(type_id) + + opts = _get_opts_from_specs(specs) + _get_smartprovisioning_opts(opts) + _check_smartcache_opts(opts) + _check_smartpartition_opts(opts) + _get_qos_opts(opts) + + LOG.info('Get share type extra specs: %s', opts) + return opts + + +def get_share_privilege(type_id): + specs = {} + if type_id: + specs = share_types.get_share_type_extra_specs(type_id) + + share_privilege = { + 'huawei_share_privilege:sync': _get_string_param, + 'huawei_share_privilege:allsquash': _get_string_param, + 'huawei_share_privilege:rootsquash': _get_string_param, + 'huawei_share_privilege:secure': _get_string_param, + } + + opts = {} + for spec_key in specs: + key = spec_key.lower() + if share_privilege.get(key): + opt_key = _get_opt_key(key) + opts[opt_key.upper()] = share_privilege[key](key, specs[spec_key]) + + return opts + + +def _get_opt_key(spec_key): + key_split = spec_key.split(':') + if len(key_split) == 1: + return key_split[0] + else: + return key_split[1] + + +def _get_bool_param(k, v): + words = v.split() + if len(words) == 2 and words[0] == '': + return strutils.bool_from_string(words[1], strict=True) + + msg = _("%(k)s spec must be specified as %(k)s=' True' " + "or ' False'.") % {'k': k} + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +def _get_string_param(k, v): + if not v: + msg = _("%s spec must be specified as a string.") % k + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return v + + +def _get_opts_from_specs(specs): + opts_capability = { + 'capabilities:dedupe': (_get_bool_param, False), + 'capabilities:compression': (_get_bool_param, False), + 'capabilities:huawei_smartcache': (_get_bool_param, False), + 'capabilities:huawei_smartpartition': (_get_bool_param, False), + 'capabilities:thin_provisioning': (_get_bool_param, None), + 'capabilities:qos': (_get_bool_param, False), + 'capabilities:hypermetro': (_get_bool_param, False), + 'huawei_smartcache:cachename': (_get_string_param, None), + 'huawei_smartpartition:partitionname': (_get_string_param, None), + 'huawei_sectorsize:sectorsize': (_get_string_param, None), + 'huawei_controller:controllername': (_get_string_param, None), + 'qos:iotype': (_get_string_param, None), + 'qos:maxiops': (_get_string_param, None), + 'qos:miniops': (_get_string_param, None), + 'qos:minbandwidth': (_get_string_param, None), + 'qos:maxbandwidth': (_get_string_param, None), + 'qos:latency': (_get_string_param, None), + } + + opts = {} + for key in opts_capability: + opt_key = _get_opt_key(key) + opts[opt_key] = opts_capability[key][1] + + for spec_key in specs: + key = spec_key.lower() + if key not in opts_capability: + continue + func = opts_capability[key][0] + opt_key = _get_opt_key(key) + opts[opt_key] = func(key, specs[spec_key]) + + return opts + + +def _get_smartprovisioning_opts(opts): + if opts['thin_provisioning'] is None: + return + + if opts['thin_provisioning']: + opts['LUNType'] = constants.ALLOC_TYPE_THIN_FLAG + else: + opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG + + +def _check_smartcache_opts(opts): + if opts['huawei_smartcache'] and not opts['cachename']: + msg = _('Cache name is not set, please set ' + 'huawei_smartcache:cachename in extra specs.') + raise exception.InvalidInput(reason=msg) + + +def _check_smartpartition_opts(opts): + if opts['huawei_smartpartition'] and not opts['partitionname']: + msg = _('Partition name is not set, please set ' + 'huawei_smartpartition:partitionname in extra specs.') + raise exception.InvalidInput(reason=msg) + + +def _get_qos_opts(opts): + if not opts['qos']: + return + + qos = {} + for key in ('maxiops', 'miniops', 'minbandwidth', + 'maxbandwidth', 'latency'): + if not opts.get(key): + opts.pop(key, None) + elif int(opts[key]) <= 0: + msg = _('QoS %s must be set greater than 0.') % key + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + else: + qos[key.upper()] = opts.pop(key) + + if not opts['iotype'] or opts['iotype'] not in constants.QOS_IO_TYPES: + msg = _('iotype must be set to one of %s.') % constants.QOS_IO_TYPES + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + else: + qos['IOTYPE'] = opts.pop('iotype') + + if (set(constants.QOS_LOWER_LIMIT) & set(qos) + and set(constants.QOS_UPPER_LIMIT) & set(qos)): + msg = _('QoS policy conflict, both protection and ' + 'restriction policy are set: %s.') % qos + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + opts['qos'] = qos + + +def wait_for_condition(func, interval, timeout): + def _retry_on_result(result): + return not result + + def _retry_on_exception(result): + return False + + r = retrying.Retrying(retry_on_result=_retry_on_result, + retry_on_exception=_retry_on_exception, + wait_fixed=interval * 1000, + stop_max_delay=timeout * 1000) + r.call(func) + + +def wait_fs_online(helper, fs_id, wait_interval, timeout): + def _wait_fs_online(): + fs = helper.get_fs_info_by_id(fs_id) + return (fs['HEALTHSTATUS'] == constants.STATUS_FS_HEALTH and + fs['RUNNINGSTATUS'] == constants.STATUS_FS_RUNNING) + + wait_for_condition(_wait_fs_online, wait_interval, timeout) + + +def share_name(name): + return name.replace('-', '_') + + +def snapshot_name(name): + return name.replace('-', '_') + + +def snapshot_id(fs_id, name): + return fs_id + "@" + snapshot_name(name) + + +def share_size(size): + return int(size) * constants.CAPACITY_UNIT + + +def share_path(name): + return "/" + name.replace("-", "_") + "/" + + +def get_share_by_location(export_location, share_proto): + share_ip = None + share_name = None + + if share_proto == 'NFS': + export_location_split = export_location.split(':/') + if len(export_location_split) == 2: + share_ip = export_location_split[0] + share_name = export_location_split[1] + elif share_proto == 'CIFS': + export_location_split = export_location.split('\\') + if len(export_location_split) == 4: + share_ip = export_location_split[2] + share_name = export_location_split[3] + else: + msg = _('Invalid NAS protocol %s.') % share_proto + raise exception.InvalidInput(reason=msg) + + return share_ip, share_name + + +def get_access_info(access): + return access['access_to'], access['access_type'], access['access_level'] + + +def get_replica_pair_id(helper, fs_name): + fs_info = helper.get_fs_info_by_name(fs_name) + if fs_info: + replication_ids = json.loads(fs_info['REMOTEREPLICATIONIDS']) + if replication_ids: + return replication_ids[0] + + +def get_hypermetro_vstore_id(helper, domain_name, local_vstore, remote_vstore): + try: + vstore_pair_id = helper.get_hypermetro_vstore_id( + domain_name, local_vstore, remote_vstore) + except Exception as err: + msg = _("Failed to get vStore pair id, reason: %s") % err + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if vstore_pair_id is None: + msg = _("Failed to get vStore pair id, please check relation " + "among metro domain, local vStore name and remote " + "vStore name.") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return vstore_pair_id diff --git a/Manila/Ussuri/hypermetro.py b/Manila/Ussuri/hypermetro.py new file mode 100644 index 0000000..f8a9cd4 --- /dev/null +++ b/Manila/Ussuri/hypermetro.py @@ -0,0 +1,131 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_log import log + +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants +from manila.share.drivers.huawei import huawei_utils + +LOG = log.getLogger(__name__) + + +class HyperPairManager(object): + def __init__(self, helper, configuration): + self.helper = helper + self.configuration = configuration + + def create_remote_filesystem(self, params): + fs_id = self.helper.create_filesystem(params) + huawei_utils.wait_fs_online( + self.helper, fs_id, self.configuration.wait_interval, + self.configuration.timeout) + return fs_id + + def delete_remote_filesystem(self, params): + self.helper.delete_filesystem(params) + + def update_filesystem(self, fs_id, params): + self.helper.update_filesystem(fs_id, params) + + def create_metro_pair(self, domain_name, local_fs_id, + remote_fs_id, vstore_pair_id): + try: + domain_id = self._get_domain_id(domain_name) + # Create a HyperMetro Pair + pair_params = { + "DOMAINID": domain_id, + "HCRESOURCETYPE": 2, + "LOCALOBJID": local_fs_id, + "REMOTEOBJID": remote_fs_id, + "VSTOREPAIRID": vstore_pair_id, + } + pair_info = self.helper.create_hypermetro_pair(pair_params) + except Exception: + LOG.exception("Failed to create HyperMetro pair for share %s.", + local_fs_id) + raise + self._sync_metro_pair(pair_info['ID']) + return pair_info + + def _get_domain_id(self, domain_name): + domain_id = self.helper.get_hypermetro_domain_id(domain_name) + if not domain_id: + err_msg = _("HyperMetro domain cannot be found.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + return domain_id + + def _sync_metro_pair(self, pair_id): + try: + self.helper.sync_hypermetro_pair(pair_id) + except Exception as err: + LOG.warning('Failed to sync HyperMetro pair %(id)s. ' + 'Reason: %(err)s', + {'id': pair_id, 'err': err}) + raise + + def delete_metro_pair(self, metro_id): + try: + self._suspend_metro_pair(metro_id) + self.helper.delete_hypermetro_pair(metro_id) + except Exception as err: + LOG.exception('Failed to delete HyperMetro pair %(id)s. ' + 'Reason: %(err)s', + {'id': metro_id, 'err': err}) + raise + + def _suspend_metro_pair(self, pair_id): + try: + metro_info = self._get_metro_pair_info(pair_id) + if metro_info["RUNNINGSTATUS"] in ( + constants.METRO_RUNNING_STATUS_NORMAL, + constants.METRO_RUNNING_STATUS_SYNCING, + constants.METRO_RUNNING_STATUS_TO_BE_SYNC): + self.helper.suspend_hypermetro_pair(pair_id) + else: + LOG.warning("Suspend the HyperMetro pair %s when it is in the " + "Normal, Synchronizing, or To Be Synchronized " + "state.", pair_id) + return + except Exception as err: + LOG.exception('Failed to suspend HyperMetro pair %(id)s. ' + 'Reason: %(err)s', + {'id': pair_id, 'err': err}) + raise + + def _get_metro_pair_info(self, pair_id): + try: + pair_info = self.helper.get_hypermetro_pair_by_id(pair_id) + except Exception as err: + LOG.exception('Failed to get HyperMetro pair %(id)s. ' + 'Reason: %(err)s', + {'id': pair_id, 'err': err}) + raise + return pair_info + + def check_remote_metro_info(self, domain_name, local_vstore, + remote_vstor, local_vstore_pair_id): + remote_vstore_pair_id = huawei_utils.get_hypermetro_vstore_id( + self.helper, domain_name, local_vstore, remote_vstor) + if local_vstore_pair_id != remote_vstore_pair_id: + msg = _("The local vStore pair and remote vStore pair are " + "inconsistent") + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return remote_vstore_pair_id + + def get_remote_fs_info(self, share_name): + self.helper.get_fs_info_by_name(share_name) diff --git a/Manila/Ussuri/manager.py b/Manila/Ussuri/manager.py new file mode 100644 index 0000000..ea9769d --- /dev/null +++ b/Manila/Ussuri/manager.py @@ -0,0 +1,67 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class HuaweiManager(object): + """Manages the huawei storage backend rpc.""" + + RPC_API_VERSION = '1.0' + + @property + def target(self): + """This property is used by oslo_messaging.""" + if not hasattr(self, '_target'): + import oslo_messaging as messaging + self._target = messaging.Target(version=self.RPC_API_VERSION) + return self._target + + def __init__(self, driver, replica_mgr, metro_mgr): + self.driver = driver + self.replica_mgr = replica_mgr + self.metro_mgr = metro_mgr + + def create_replica_pair( + self, ctx, local_share_info, remote_device_wwn, remote_fs_id, + local_replication): + """Create replication pair.""" + return self.replica_mgr.create( + local_share_info, remote_device_wwn, remote_fs_id, + local_replication) + + def create_remote_filesystem(self, context, params): + return self.metro_mgr.create_remote_filesystem(params) + + def delete_remote_filesystem(self, context, params): + return self.metro_mgr.delete_remote_filesystem(params) + + def update_filesystem(self, context, fs_id, params): + return self.metro_mgr.update_filesystem(fs_id, params) + + def check_remote_metro_info(self, context, domain_name, local_vstore, + remote_vstore, vstore_pair_id): + return self.metro_mgr.check_remote_metro_info( + domain_name, local_vstore, remote_vstore, vstore_pair_id) + + def delete_share(self, context, share_name, share_proto): + self.driver.rpc_delete_share(context, share_name, share_proto) + + def deny_access(self, context, params): + self.driver.rpc_deny_access(context, params) + + def allow_access(self, context, params): + self.driver.rpc_allow_access(context, params) + + def get_remote_fs_info(self, context, share_name): + self.metro_mgr.get_remote_fs_info(share_name) diff --git a/Manila/Ussuri/replication.py b/Manila/Ussuri/replication.py new file mode 100644 index 0000000..9d18a4e --- /dev/null +++ b/Manila/Ussuri/replication.py @@ -0,0 +1,256 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import json +from oslo_log import log +from oslo_utils import strutils + +from manila.common import constants as common_constants +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants + + +LOG = log.getLogger(__name__) + + +class ReplicaPairManager(object): + def __init__(self, helper): + self.helper = helper + + def create(self, local_share_info, remote_device_wwn, remote_fs_id, + local_replication): + local_share_name = local_share_info.get('name') + + try: + local_fs_info = self.helper.get_fs_info_by_name(local_share_name) + if not local_fs_info: + msg = _("Local fs was not found by name %s.") + LOG.error(msg, local_share_name) + raise exception.ReplicationException( + reason=msg % local_share_name) + local_fs_id = local_fs_info['ID'] + pair_params = { + "LOCALRESID": local_fs_id, + "LOCALRESTYPE": constants.FILE_SYSTEM_TYPE, + "REMOTERESID": remote_fs_id, + "REPLICATIONMODEL": constants.REPLICA_ASYNC_MODEL, + "RECOVERYPOLICY": '2', + "SYNCHRONIZETYPE": '1', + "SPEED": constants.REPLICA_SPEED_MEDIUM, + } + + if local_replication: + pair_params["PAIRTYPE"] = constants.LOCAL_REPLICATION + else: + remote_device = self.helper.get_remote_device_by_wwn( + remote_device_wwn) + pair_params["REMOTEDEVICEID"] = remote_device.get('ID') + + pair_info = self.helper.create_replication_pair(pair_params) + local_pair_id = pair_info['ID'] + + if local_replication: + remote_fs = self.helper.get_fs_info_by_id(remote_fs_id) + replication_ids = json.loads(remote_fs['REMOTEREPLICATIONIDS']) + # Here must have a replication id. + remote_pair_id = replication_ids[0] + else: + remote_pair_id = local_pair_id + + except Exception: + LOG.exception("Failed to create replication pair for share %s.", + local_share_name) + raise + + self._sync_replication_pair(local_pair_id) + + return local_pair_id, remote_pair_id + + def _get_replication_pair_info(self, replica_pair_id): + try: + pair_info = self.helper.get_replication_pair_by_id( + replica_pair_id) + except Exception: + LOG.exception('Failed to get replication pair info for %s.', + replica_pair_id) + raise + + return pair_info + + def _check_replication_health(self, pair_info): + if (pair_info['HEALTHSTATUS'] != + constants.REPLICA_HEALTH_STATUS_NORMAL): + return common_constants.STATUS_ERROR + + def _check_replication_running_status(self, pair_info): + if (pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_SPLITTED, + constants.REPLICA_RUNNING_STATUS_TO_RECOVER)): + return common_constants.REPLICA_STATE_OUT_OF_SYNC + + if (pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_INTERRUPTED, + constants.REPLICA_RUNNING_STATUS_INVALID)): + return common_constants.STATUS_ERROR + + def _check_replication_secondary_data_status(self, pair_info): + if (pair_info['SECRESDATASTATUS'] in + constants.REPLICA_DATA_STATUS_IN_SYNC): + return common_constants.REPLICA_STATE_IN_SYNC + else: + return common_constants.REPLICA_STATE_OUT_OF_SYNC + + def _check_replica_state(self, pair_info): + result = self._check_replication_health(pair_info) + if result is not None: + return result + + result = self._check_replication_running_status(pair_info) + if result is not None: + return result + + return self._check_replication_secondary_data_status(pair_info) + + def get_replica_state(self, replica_pair_id): + try: + pair_info = self._get_replication_pair_info(replica_pair_id) + except Exception: + # if cannot communicate to backend, return error + LOG.error('Cannot get replica state, return error.') + return common_constants.STATUS_ERROR + + return self._check_replica_state(pair_info) + + def _sync_replication_pair(self, pair_id): + try: + self.helper.sync_replication_pair(pair_id) + except Exception as err: + LOG.warning('Failed to sync replication pair %(id)s. ' + 'Reason: %(err)s', + {'id': pair_id, 'err': err}) + + def update_replication_pair_state(self, replica_pair_id): + pair_info = self._get_replication_pair_info(replica_pair_id) + + def _is_to_recover(pair_info): + return (pair_info['RUNNINGSTATUS'] == + constants.REPLICA_RUNNING_STATUS_TO_RECOVER) + + health = self._check_replication_health(pair_info) + if health is not None: + if not _is_to_recover(pair_info): + LOG.warning("Cannot update the replication %s because it's " + "not in normal status and not to recover.", + replica_pair_id) + return + + # replication is in to-recover status, try to resync manually. + LOG.debug("Resync replication %s because it is to recover.", + replica_pair_id) + self._sync_replication_pair(replica_pair_id) + return + + if strutils.bool_from_string(pair_info['ISPRIMARY']): + # current replica is primary, not consistent with manila. + # the reason for this circumstance is the last switch over + # didn't succeed completely. continue the switch over progress.. + try: + self.helper.switch_replication_pair(replica_pair_id) + except Exception: + LOG.exception( + 'Replication pair %s primary/secondary relationship is ' + 'not right, try to switch over again but still failed.', + replica_pair_id) + return + + # refresh the replication pair info + pair_info = self._get_replication_pair_info(replica_pair_id) + + if pair_info['SECRESACCESS'] == constants.REPLICA_SECONDARY_RW: + try: + self.helper.set_pair_secondary_write_lock(replica_pair_id) + except Exception: + LOG.exception('Replication pair %s secondary access is R/W, ' + 'try to set write lock but still failed.', + replica_pair_id) + return + + if pair_info['RUNNINGSTATUS'] in ( + constants.REPLICA_RUNNING_STATUS_NORMAL, + constants.REPLICA_RUNNING_STATUS_SPLITTED, + constants.REPLICA_RUNNING_STATUS_TO_RECOVER): + self._sync_replication_pair(replica_pair_id) + + def switch_over(self, replica_pair_id): + pair_info = self._get_replication_pair_info(replica_pair_id) + + if strutils.bool_from_string(pair_info['ISPRIMARY']): + LOG.warning('The replica to promote is already primary, ' + 'no need to switch over.') + return + + replica_state = self._check_replica_state(pair_info) + if replica_state != common_constants.REPLICA_STATE_IN_SYNC: + # replica is not in SYNC state, can't be promoted + msg = _('Data of replica %s is not sync, cannot promote.' + ) % replica_pair_id + raise exception.ReplicationException(reason=msg) + + try: + self.helper.split_replication_pair(replica_pair_id) + except Exception: + # split failed + # means replication pair is in an abnormal status, + # ignore this exception, continue to cancel secondary write lock, + # let secondary share accessible for disaster recovery. + LOG.exception('Failed to split replication pair %s while ' + 'switching over.', replica_pair_id) + + try: + self.helper.cancel_pair_secondary_write_lock(replica_pair_id) + except Exception: + LOG.exception('Failed to cancel replication pair %s ' + 'secondary write lock.', replica_pair_id) + raise + + try: + self.helper.switch_replication_pair(replica_pair_id) + self.helper.set_pair_secondary_write_lock(replica_pair_id) + self.helper.sync_replication_pair(replica_pair_id) + except Exception: + LOG.exception('Failed to completely switch over ' + 'replication pair %s.', replica_pair_id) + + # for all the rest steps, + # because secondary share is accessible now, + # the upper business may access the secondary share, + # return success to tell replica is primary. + return + + def delete_replication_pair(self, replica_pair_id): + try: + self.helper.split_replication_pair(replica_pair_id) + except Exception: + # Ignore this exception because replication pair may at some + # abnormal status that supports deleting. + LOG.warning('Failed to split replication pair %s before deletion, ' + 'try to delete it anyway.', replica_pair_id) + + try: + self.helper.delete_replication_pair(replica_pair_id) + except Exception: + LOG.exception('Failed to delete replication pair %s.', + replica_pair_id) + raise diff --git a/Manila/Ussuri/rpcapi.py b/Manila/Ussuri/rpcapi.py new file mode 100644 index 0000000..1453f66 --- /dev/null +++ b/Manila/Ussuri/rpcapi.py @@ -0,0 +1,188 @@ +# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_messaging as messaging + +from manila import rpc +from manila.share import utils + + +class HuaweiAPI(object): + """Client side of the huawei storage rpc API. + + API version history: + + 1.0 - Initial version. + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self): + self.topic = 'huawei_storage' + target = messaging.Target(topic=self.topic, + version=self.BASE_RPC_API_VERSION) + self.client = rpc.get_client(target, version_cap='1.0') + + def create_replica_pair( + self, context, host, local_share_info, remote_device_wwn, + remote_fs_id, local_replication): + new_host = utils.extract_host(host) + call_context = self.client.prepare(server=new_host, version='1.0') + return call_context.call( + context, 'create_replica_pair', + local_share_info=local_share_info, + remote_device_wwn=remote_device_wwn, + remote_fs_id=remote_fs_id, + local_replication=local_replication, + ) + + def create_remote_filesystem(self, context, host, params): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + + try: + return call_context.call(context, 'create_remote_filesystem', + params=params) + except Exception: + try: + return call_context.call(context, 'create_remote_filesystem', + params=params) + except Exception: + raise + + def delete_remote_filesystem(self, context, host, params): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call(context, 'delete_remote_filesystem', + params=params) + except Exception: + try: + call_context.call(context, 'delete_remote_filesystem', + params=params) + except Exception: + raise + + def delete_share(self, context, share_name, share_proto, host): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call(context, + 'delete_share', + share_name=share_name, + share_proto=share_proto) + except Exception: + try: + call_context.call(context, + 'delete_share', + share_name=share_name, + share_proto=share_proto) + except Exception: + raise + + def deny_access(self, context, params, host): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call(context, + 'deny_access', + params=params) + except Exception: + try: + call_context.call(context, + 'deny_access', + params=params) + except Exception: + raise + + def allow_access(self, context, params, host): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call(context, + 'allow_access', + params=params) + except Exception: + try: + call_context.call(context, + 'allow_access', + params=params) + except Exception: + raise + + def update_filesystem(self, context, host, fs_id, params): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call(context, 'update_filesystem', fs_id=fs_id, + params=params) + except Exception: + try: + call_context.call(context, 'update_filesystem', fs_id=fs_id, + params=params) + except Exception: + raise + + def check_remote_metro_info(self, context, host, domain_name, + local_vstore, remote_vstore, vstore_pair_id): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call( + context, 'check_remote_metro_info', domain_name=domain_name, + local_vstore=remote_vstore, remote_vstore=local_vstore, + vstore_pair_id=vstore_pair_id) + except Exception: + try: + call_context.call( + context, 'check_remote_metro_info', + domain_name=domain_name, + local_vstore=local_vstore, + remote_vstore=remote_vstore, + vstore_pair_id=vstore_pair_id) + except Exception: + raise + + def get_remote_fs_info(self, context, share_name, host): + try: + call_context = self.client.prepare(server=host, version='1.0') + except Exception: + raise + try: + call_context.call( + context, 'get_remote_fs_info', + share_name=share_name + ) + except Exception: + try: + call_context.call( + context, 'get_remote_fs_info', + share_name=share_name) + except Exception: + raise diff --git a/Manila/Ussuri/smartx.py b/Manila/Ussuri/smartx.py new file mode 100644 index 0000000..25f63ed --- /dev/null +++ b/Manila/Ussuri/smartx.py @@ -0,0 +1,133 @@ +# Copyright (c) 2015 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +from oslo_log import log as logging + +from manila import exception +from manila.i18n import _ +from manila.share.drivers.huawei import constants + +LOG = logging.getLogger(__name__) + + +class SmartPartition(object): + def __init__(self, helper): + self.helper = helper + + def add(self, partitionname, fs_id): + partition_id = self.helper.get_partition_id_by_name(partitionname) + if not partition_id: + msg = _('Partition %s not exist.') % partitionname + raise exception.InvalidInput(reason=msg) + + self.helper.add_fs_to_partition(fs_id, partition_id) + + def remove(self, fs_id, partition_id): + self.helper.remove_fs_from_partition(fs_id, partition_id) + + def update(self, fs_id, partitionname, partition_id): + partition_info = self.helper.get_partition_info_by_id(partition_id) + if partition_info['NAME'] == partitionname: + return + + self.remove(fs_id, partition_id) + self.add(partitionname, fs_id) + + +class SmartCache(object): + def __init__(self, helper): + self.helper = helper + + def add(self, cachename, fs_id): + cache_id = self.helper.get_cache_id_by_name(cachename) + if not cache_id: + msg = _('Cache %s not exist.') % cachename + raise exception.InvalidInput(reason=msg) + + self.helper.add_fs_to_cache(fs_id, cache_id) + + def remove(self, fs_id, cache_id): + self.helper.remove_fs_from_cache(fs_id, cache_id) + + def update(self, fs_id, cachename, cache_id): + cache_info = self.helper.get_cache_info_by_id(cache_id) + if cache_info['NAME'] == cachename: + return + + self.remove(fs_id, cache_id) + self.add(cachename, fs_id) + + +class SmartQos(object): + def __init__(self, helper): + self.helper = helper + + def _check_qos_consistency(self, policy, qos): + check_keys = set(constants.QOS_KEYS) & set(qos.keys()) + policy_keys = set(constants.QOS_KEYS) & set(policy.keys()) + + if 'LATENCY' in policy_keys and policy['LATENCY'] == '0': + policy_keys.remove('LATENCY') + + if check_keys != policy_keys: + return False + + for key in check_keys: + if qos[key] != policy[key]: + return False + + return True + + def add(self, qos, fs_id): + self._change_lun_priority(qos, fs_id) + + qos_id = self.helper.create_qos(qos, fs_id) + try: + self.helper.activate_deactivate_qos(qos_id, True) + except exception.VolumeBackendAPIException: + self.remove(qos_id, fs_id) + raise + + return qos_id + + def _change_lun_priority(self, qos, fs_id): + for key in qos: + if key.startswith('MIN') or key.startswith('LATENCY'): + data = {"IOPRIORITY": "3"} + self.helper.update_filesystem(fs_id, data) + break + + def remove(self, fs_id, qos_id, qos_info=None): + if not qos_info: + qos_info = self.helper.get_qos_info(qos_id) + fs_list = json.loads(qos_info['FSLIST']) + if fs_id in fs_list: + fs_list.remove(fs_id) + + if len(fs_list) <= 0: + if qos_info['RUNNINGSTATUS'] != constants.QOS_INACTIVATED: + self.helper.activate_deactivate_qos(qos_id, False) + self.helper.delete_qos(qos_id) + else: + self.helper.update_qos_fs(qos_id, fs_list) + + def update(self, fs_id, new_qos, qos_id): + qos_info = self.helper.get_qos_info(qos_id) + if self._check_qos_consistency(qos_info, new_qos): + return + + self.remove(fs_id, qos_id, qos_info) + self.add(new_qos, fs_id)