diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index d83a99e2b1..8b78f4cd0b 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -24,13 +24,18 @@ jobs: with: python-version: ${{ matrix.python }} - run: python -m pip install -U uv - - run: uv pip install --system --only-binary=horovod -e .[cpu,test,torch] horovod[tensorflow-cpu] mpi4py mpich + - run: | + uv pip install --system mpich + uv pip install --system "torch==2.3.0+cpu.cxx11.abi" -i https://download.pytorch.org/whl/ + export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') + uv pip install --system --only-binary=horovod -e .[cpu,test] horovod[tensorflow-cpu] mpi4py env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses # TensorFlow in the build dependency, but if it # changes, setting `TENSORFLOW_ROOT`. TENSORFLOW_VERSION: ${{ matrix.python == '3.8' && '2.13.1' || '2.16.1' }} + DP_ENABLE_PYTORCH: 1 DP_BUILD_TESTING: 1 UV_EXTRA_INDEX_URL: "https://pypi.anaconda.org/njzjz/simple https://pypi.anaconda.org/mpi4py/simple" - run: dp --version diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index a239412416..e3cfe2e4a4 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -193,6 +193,12 @@ class DescrptDPA1(NativeOP, BaseDescriptor): Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. The default value is `None`, which means the `tebd_input_mode` setting will be used instead. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. + spin (Only support None to keep consistent with other backend references.) (Not used in this version. Not-none option is not implemented.) @@ -242,6 +248,8 @@ def __init__( concat_output_tebd: bool = True, spin: Optional[Any] = None, stripped_type_embedding: Optional[bool] = None, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, # consistent with argcheck, not used though seed: Optional[int] = None, ) -> None: @@ -287,12 +295,16 @@ def __init__( trainable_ln=trainable_ln, ln_eps=ln_eps, ) + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map self.type_embedding = TypeEmbedNet( ntypes=ntypes, neuron=[tebd_dim], padding=True, activation_function="Linear", precision=precision, + use_econf_tebd=use_econf_tebd, + type_map=type_map, ) self.tebd_dim = tebd_dim self.concat_output_tebd = concat_output_tebd @@ -457,6 +469,8 @@ def serialize(self) -> dict: "smooth_type_embedding": obj.smooth, "type_one_side": obj.type_one_side, "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, # make deterministic "precision": np.dtype(PRECISION_DICT[obj.precision]).name, "embeddings": obj.embeddings.serialize(), diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index c22b9e9bfe..78bf174685 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -323,6 +323,8 @@ def __init__( trainable: bool = True, seed: Optional[int] = None, add_tebd_to_repinit_out: bool = False, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, ): r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. @@ -350,6 +352,11 @@ def __init__( (Unused yet) Random seed for parameter initialization. add_tebd_to_repinit_out : bool, optional Whether to add type embedding to the output representation from repinit before inputting it into repformer. + use_econf_tebd : bool, Optional + Whether to use electronic configuration type embedding. + type_map : List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. Returns ------- @@ -433,12 +440,16 @@ def init_subclass_params(sub_data, sub_class): trainable_ln=self.repformer_args.trainable_ln, ln_eps=self.repformer_args.ln_eps, ) + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map self.type_embedding = TypeEmbedNet( ntypes=ntypes, neuron=[self.repinit_args.tebd_dim], padding=True, activation_function="Linear", precision=precision, + use_econf_tebd=use_econf_tebd, + type_map=type_map, ) self.concat_output_tebd = concat_output_tebd self.precision = precision @@ -641,6 +652,8 @@ def serialize(self) -> dict: "env_protection": self.env_protection, "trainable": self.trainable, "add_tebd_to_repinit_out": self.add_tebd_to_repinit_out, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "type_embedding": self.type_embedding.serialize(), "g1_shape_tranform": self.g1_shape_tranform.serialize(), } diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 1db5d539cf..2a006f1bd6 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -76,10 +76,10 @@ def __init__( self, model_file: str, output_def: ModelOutputDef, - *args: List[Any], + *args: Any, auto_batch_size: Union[bool, int, AutoBatchSize] = True, neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ): self.output_def = output_def self.model_path = model_file @@ -161,12 +161,12 @@ def get_ntypes_spin(self): def eval( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index 7527c122f3..201ac91cc6 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -39,6 +39,11 @@ class TypeEmbedNet(NativeOP): Random seed for initializing the network parameters. padding Concat the zero padding to the output, as the default embedding of empty type. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -52,6 +57,8 @@ def __init__( trainable: bool = True, seed: Optional[int] = None, padding: bool = False, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, ) -> None: self.ntypes = ntypes self.neuron = neuron @@ -61,8 +68,33 @@ def __init__( self.activation_function = str(activation_function) self.trainable = trainable self.padding = padding + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + embed_input_dim = ntypes + if self.use_econf_tebd: + from deepmd.utils.econf_embd import ( + ECONF_DIM, + electronic_configuration_embedding, + ) + from deepmd.utils.econf_embd import type_map as periodic_table + + assert ( + self.type_map is not None + ), "When using electronic configuration type embedding, type_map must be provided!" + + missing_types = [t for t in self.type_map if t not in periodic_table] + assert not missing_types, ( + "When using electronic configuration type embedding, " + "all element in type_map should be in periodic table! " + f"Found these invalid elements: {missing_types}" + ) + self.econf_tebd = np.array( + [electronic_configuration_embedding[kk] for kk in self.type_map], + dtype=PRECISION_DICT[self.precision], + ) + embed_input_dim = ECONF_DIM self.embedding_net = EmbeddingNet( - ntypes, + embed_input_dim, self.neuron, self.activation_function, self.resnet_dt, @@ -71,9 +103,12 @@ def __init__( def call(self) -> np.ndarray: """Compute the type embedding network.""" - embed = self.embedding_net( - np.eye(self.ntypes, dtype=PRECISION_DICT[self.precision]) - ) + if not self.use_econf_tebd: + embed = self.embedding_net( + np.eye(self.ntypes, dtype=PRECISION_DICT[self.precision]) + ) + else: + embed = self.embedding_net(self.econf_tebd) if self.padding: embed = np.pad(embed, ((0, 1), (0, 0)), mode="constant") return embed @@ -120,5 +155,7 @@ def serialize(self) -> dict: "activation_function": self.activation_function, "trainable": self.trainable, "padding": self.padding, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "embedding": self.embedding_net.serialize(), } diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 5f4e758f0b..1d2248aa0d 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -46,14 +46,7 @@ ) if TYPE_CHECKING: - from deepmd.tf.infer import ( - DeepDipole, - DeepDOS, - DeepPolar, - DeepPot, - DeepWFC, - ) - from deepmd.tf.infer.deep_tensor import ( + from deepmd.infer.deep_tensor import ( DeepTensor, ) diff --git a/deepmd/infer/deep_dos.py b/deepmd/infer/deep_dos.py index 7823f02999..c8d55560b6 100644 --- a/deepmd/infer/deep_dos.py +++ b/deepmd/infer/deep_dos.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - Dict, List, Optional, Tuple, @@ -70,7 +69,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Tuple[np.ndarray, ...]: """Evaluate energy, force, and virial. If atomic is True, also return atomic energy and atomic virial. diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index aae2082e13..5a00ba616d 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -11,6 +11,7 @@ List, Optional, Tuple, + Type, Union, ) @@ -82,10 +83,10 @@ def __init__( self, model_file: str, output_def: ModelOutputDef, - *args: List[Any], + *args: Any, auto_batch_size: Union[bool, int, AutoBatchSize] = True, neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> None: pass @@ -99,12 +100,12 @@ def __new__(cls, model_file: str, *args, **kwargs): def eval( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. @@ -166,13 +167,13 @@ def get_dim_aparam(self) -> int: def eval_descriptor( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, efield: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> np.ndarray: """Evaluate descriptors by using this DP. @@ -246,11 +247,11 @@ def _check_mixed_types(self, atom_types: np.ndarray) -> bool: # assume mixed_types if there are virtual types, even when # the atom types of all frames are the same return False - return np.all(np.equal(atom_types, atom_types[0])) + return np.all(np.equal(atom_types, atom_types[0])).item() @property @abstractmethod - def model_type(self) -> "DeepEval": + def model_type(self) -> Type["DeepEval"]: """The the evaluator of the model type.""" @abstractmethod @@ -316,10 +317,10 @@ def __new__(cls, model_file: str, *args, **kwargs): def __init__( self, model_file: str, - *args: List[Any], + *args: Any, auto_batch_size: Union[bool, int, AutoBatchSize] = True, neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> None: self.deep_eval = DeepEvalBackend( model_file, @@ -387,7 +388,7 @@ def eval_descriptor( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> np.ndarray: """Evaluate descriptors by using this DP. diff --git a/deepmd/infer/deep_polar.py b/deepmd/infer/deep_polar.py index f857619871..c2089b278d 100644 --- a/deepmd/infer/deep_polar.py +++ b/deepmd/infer/deep_polar.py @@ -50,7 +50,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: dict, + **kwargs, ) -> np.ndarray: """Evaluate the model. diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py index bc0bfc9599..401698bb14 100644 --- a/deepmd/infer/deep_pot.py +++ b/deepmd/infer/deep_pot.py @@ -1,11 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( Any, - Dict, List, + Literal, Optional, Tuple, Union, + overload, ) import numpy as np @@ -89,6 +90,48 @@ def output_def_mag(self) -> ModelOutputDef: ) ) + @overload + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: Union[List[int], np.ndarray], + atomic: Literal[True], + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + mixed_type: bool, + **kwargs: Any, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + pass + + @overload + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: Union[List[int], np.ndarray], + atomic: Literal[False], + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + mixed_type: bool, + **kwargs: Any, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + pass + + @overload + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: Union[List[int], np.ndarray], + atomic: bool, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + mixed_type: bool, + **kwargs: Any, + ) -> Tuple[np.ndarray, ...]: + pass + def eval( self, coords: np.ndarray, @@ -98,7 +141,7 @@ def eval( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, mixed_type: bool = False, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Tuple[np.ndarray, ...]: """Evaluate energy, force, and virial. If atomic is True, also return atomic energy and atomic virial. diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 477acf0282..61025bcb70 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -28,7 +28,7 @@ def calc_model_devi_f( fs: np.ndarray, real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, - atomic: Literal[False] = False, + atomic: Literal[False] = ..., ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: ... @@ -37,11 +37,19 @@ def calc_model_devi_f( fs: np.ndarray, real_f: Optional[np.ndarray] = None, relative: Optional[float] = None, - *, - atomic: Literal[True], + atomic: Literal[True] = ..., ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: ... +@overload +def calc_model_devi_f( + fs: np.ndarray, + real_f: Optional[np.ndarray] = None, + relative: Optional[float] = None, + atomic: bool = False, +) -> Tuple[np.ndarray, ...]: ... + + def calc_model_devi_f( fs: np.ndarray, real_f: Optional[np.ndarray] = None, diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 8a3a61400d..0e3dd292cb 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -7,6 +7,7 @@ List, Optional, Tuple, + Type, Union, ) @@ -87,11 +88,11 @@ def __init__( self, model_file: str, output_def: ModelOutputDef, - *args: List[Any], + *args: Any, auto_batch_size: Union[bool, int, AutoBatchSize] = True, neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, head: Optional[str] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ): self.output_def = output_def self.model_path = model_file @@ -165,7 +166,7 @@ def get_dim_aparam(self) -> int: return self.dp.model["Default"].get_dim_aparam() @property - def model_type(self) -> "DeepEvalWrapper": + def model_type(self) -> Type["DeepEvalWrapper"]: """The the evaluator of the model type.""" model_output_type = self.dp.model["Default"].model_output_type() if "energy" in model_output_type: @@ -211,12 +212,12 @@ def get_has_spin(self): def eval( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py index ccc23b690c..97e329935a 100644 --- a/deepmd/pt/loss/ener.py +++ b/deepmd/pt/loss/ener.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( List, + Optional, ) import torch @@ -34,6 +35,11 @@ def __init__( limit_pref_ae: float = 0.0, start_pref_pf: float = 0.0, limit_pref_pf: float = 0.0, + relative_f: Optional[float] = None, + enable_atom_ener_coeff: bool = False, + start_pref_gf: float = 0.0, + limit_pref_gf: float = 0.0, + numb_generalized_coord: int = 0, use_l1_all: bool = False, inference=False, **kwargs, @@ -64,6 +70,18 @@ def __init__( The prefactor of atomic prefactor force loss at the start of the training. limit_pref_pf : float The prefactor of atomic prefactor force loss at the end of the training. + relative_f : float + If provided, relative force error will be used in the loss. The difference + of force will be normalized by the magnitude of the force in the label with + a shift given by relative_f + enable_atom_ener_coeff : bool + if true, the energy will be computed as \sum_i c_i E_i + start_pref_gf : float + The prefactor of generalized force loss at the start of the training. + limit_pref_gf : float + The prefactor of generalized force loss at the end of the training. + numb_generalized_coord : int + The dimension of generalized coordinates. use_l1_all : bool Whether to use L1 loss, if False (default), it will use L2 loss. inference : bool @@ -76,10 +94,9 @@ def __init__( self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference self.has_f = (start_pref_f != 0.0 and limit_pref_f != 0.0) or inference self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference - - # TODO EnergyStdLoss need support for atomic energy and atomic pref self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference + self.has_gf = (start_pref_gf != 0.0 and limit_pref_gf != 0.0) or inference self.start_pref_e = start_pref_e self.limit_pref_e = limit_pref_e @@ -87,6 +104,19 @@ def __init__( self.limit_pref_f = limit_pref_f self.start_pref_v = start_pref_v self.limit_pref_v = limit_pref_v + self.start_pref_ae = start_pref_ae + self.limit_pref_ae = limit_pref_ae + self.start_pref_pf = start_pref_pf + self.limit_pref_pf = limit_pref_pf + self.start_pref_gf = start_pref_gf + self.limit_pref_gf = limit_pref_gf + self.relative_f = relative_f + self.enable_atom_ener_coeff = enable_atom_ener_coeff + self.numb_generalized_coord = numb_generalized_coord + if self.has_gf and self.numb_generalized_coord < 1: + raise RuntimeError( + "When generalized force loss is used, the dimension of generalized coordinates should be larger than 0" + ) self.use_l1_all = use_l1_all self.inference = inference @@ -118,18 +148,35 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): pref_e = self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * coef pref_f = self.limit_pref_f + (self.start_pref_f - self.limit_pref_f) * coef pref_v = self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * coef + pref_ae = self.limit_pref_ae + (self.start_pref_ae - self.limit_pref_ae) * coef + pref_pf = self.limit_pref_pf + (self.start_pref_pf - self.limit_pref_pf) * coef + pref_gf = self.limit_pref_gf + (self.start_pref_gf - self.limit_pref_gf) * coef + loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} # more_loss['log_keys'] = [] # showed when validation on the fly # more_loss['test_keys'] = [] # showed when doing dp test atom_norm = 1.0 / natoms if self.has_e and "energy" in model_pred and "energy" in label: + energy_pred = model_pred["energy"] + energy_label = label["energy"] + if self.enable_atom_ener_coeff and "atom_energy" in model_pred: + atom_ener_pred = model_pred["atom_energy"] + # when ener_coeff (\nu) is defined, the energy is defined as + # E = \sum_i \nu_i E_i + # instead of the sum of atomic energies. + # + # A case is that we want to train reaction energy + # A + B -> C + D + # E = - E(A) - E(B) + E(C) + E(D) + # A, B, C, D could be put far away from each other + atom_ener_coeff = label["atom_ener_coeff"] + atom_ener_coeff = atom_ener_coeff.reshape(atom_ener_pred.shape) + energy_pred = torch.sum(atom_ener_coeff * atom_ener_pred, dim=1) find_energy = label.get("find_energy", 0.0) pref_e = pref_e * find_energy if not self.use_l1_all: - l2_ener_loss = torch.mean( - torch.square(model_pred["energy"] - label["energy"]) - ) + l2_ener_loss = torch.mean(torch.square(energy_pred - energy_label)) if not self.inference: more_loss["l2_ener_loss"] = self.display_if_exist( l2_ener_loss.detach(), find_energy @@ -142,77 +189,111 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): # more_loss['log_keys'].append('rmse_e') else: # use l1 and for all atoms l1_ener_loss = F.l1_loss( - model_pred["energy"].reshape(-1), - label["energy"].reshape(-1), + energy_pred.reshape(-1), + energy_label.reshape(-1), reduction="sum", ) loss += pref_e * l1_ener_loss more_loss["mae_e"] = self.display_if_exist( F.l1_loss( - model_pred["energy"].reshape(-1), - label["energy"].reshape(-1), + energy_pred.reshape(-1), + energy_label.reshape(-1), reduction="mean", ).detach(), find_energy, ) # more_loss['log_keys'].append('rmse_e') if mae: - mae_e = ( - torch.mean(torch.abs(model_pred["energy"] - label["energy"])) - * atom_norm - ) + mae_e = torch.mean(torch.abs(energy_pred - energy_label)) * atom_norm more_loss["mae_e"] = self.display_if_exist(mae_e.detach(), find_energy) - mae_e_all = torch.mean( - torch.abs(model_pred["energy"] - label["energy"]) - ) + mae_e_all = torch.mean(torch.abs(energy_pred - energy_label)) more_loss["mae_e_all"] = self.display_if_exist( mae_e_all.detach(), find_energy ) - if self.has_f and "force" in model_pred and "force" in label: + if ( + (self.has_f or self.has_pf or self.relative_f or self.has_gf) + and "force" in model_pred + and "force" in label + ): find_force = label.get("find_force", 0.0) pref_f = pref_f * find_force - if "force_target_mask" in model_pred: - force_target_mask = model_pred["force_target_mask"] - else: - force_target_mask = None - if not self.use_l1_all: - if force_target_mask is not None: - diff_f = (label["force"] - model_pred["force"]) * force_target_mask - force_cnt = force_target_mask.squeeze(-1).sum(-1) - l2_force_loss = torch.mean( - torch.square(diff_f).mean(-1).sum(-1) / force_cnt - ) - else: - diff_f = label["force"] - model_pred["force"] + force_pred = model_pred["force"] + force_label = label["force"] + diff_f = (force_label - force_pred).reshape(-1) + + if self.relative_f is not None: + force_label_3 = force_label.reshape(-1, 3) + norm_f = force_label_3.norm(dim=1, keepdim=True) + self.relative_f + diff_f_3 = diff_f.reshape(-1, 3) + diff_f_3 = diff_f_3 / norm_f + diff_f = diff_f_3.reshape(-1) + + if self.has_f: + if not self.use_l1_all: l2_force_loss = torch.mean(torch.square(diff_f)) - if not self.inference: - more_loss["l2_force_loss"] = self.display_if_exist( - l2_force_loss.detach(), find_force + if not self.inference: + more_loss["l2_force_loss"] = self.display_if_exist( + l2_force_loss.detach(), find_force + ) + loss += (pref_f * l2_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_f = l2_force_loss.sqrt() + more_loss["rmse_f"] = self.display_if_exist( + rmse_f.detach(), find_force ) - loss += (pref_f * l2_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) - rmse_f = l2_force_loss.sqrt() - more_loss["rmse_f"] = self.display_if_exist(rmse_f.detach(), find_force) - else: - l1_force_loss = F.l1_loss( - label["force"], model_pred["force"], reduction="none" - ) - if force_target_mask is not None: - l1_force_loss *= force_target_mask - force_cnt = force_target_mask.squeeze(-1).sum(-1) - more_loss["mae_f"] = self.display_if_exist( - (l1_force_loss.mean(-1).sum(-1) / force_cnt).mean(), find_force - ) - l1_force_loss = (l1_force_loss.sum(-1).sum(-1) / force_cnt).sum() else: + l1_force_loss = F.l1_loss(force_label, force_pred, reduction="none") more_loss["mae_f"] = self.display_if_exist( l1_force_loss.mean().detach(), find_force ) l1_force_loss = l1_force_loss.sum(-1).mean(-1).sum() - loss += (pref_f * l1_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) - if mae: - mae_f = torch.mean(torch.abs(diff_f)) - more_loss["mae_f"] = self.display_if_exist(mae_f.detach(), find_force) + loss += (pref_f * l1_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + if mae: + mae_f = torch.mean(torch.abs(diff_f)) + more_loss["mae_f"] = self.display_if_exist( + mae_f.detach(), find_force + ) + + if self.has_pf and "atom_pref" in label: + atom_pref = label["atom_pref"] + find_atom_pref = label.get("find_atom_pref", 0.0) + pref_pf = pref_pf * find_atom_pref + atom_pref_reshape = atom_pref.reshape(-1) + l2_pref_force_loss = (torch.square(diff_f) * atom_pref_reshape).mean() + if not self.inference: + more_loss["l2_pref_force_loss"] = self.display_if_exist( + l2_pref_force_loss.detach(), find_atom_pref + ) + loss += (pref_pf * l2_pref_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_pf = l2_pref_force_loss.sqrt() + more_loss["rmse_pf"] = self.display_if_exist( + rmse_pf.detach(), find_atom_pref + ) + + if self.has_gf and "drdq" in label: + drdq = label["drdq"] + find_drdq = label.get("find_drdq", 0.0) + pref_gf = pref_gf * find_drdq + force_reshape_nframes = force_pred.reshape(-1, natoms * 3) + force_label_reshape_nframes = force_label.reshape(-1, natoms * 3) + drdq_reshape = drdq.reshape(-1, natoms * 3, self.numb_generalized_coord) + gen_force_label = torch.einsum( + "bij,bi->bj", drdq_reshape, force_label_reshape_nframes + ) + gen_force = torch.einsum( + "bij,bi->bj", drdq_reshape, force_reshape_nframes + ) + diff_gen_force = gen_force_label - gen_force + l2_gen_force_loss = torch.square(diff_gen_force).mean() + if not self.inference: + more_loss["l2_gen_force_loss"] = self.display_if_exist( + l2_gen_force_loss.detach(), find_drdq + ) + loss += (pref_gf * l2_gen_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_gf = l2_gen_force_loss.sqrt() + more_loss["rmse_gf"] = self.display_if_exist( + rmse_gf.detach(), find_drdq + ) if self.has_v and "virial" in model_pred and "virial" in label: find_virial = label.get("find_virial", 0.0) @@ -229,6 +310,27 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if mae: mae_v = torch.mean(torch.abs(diff_v)) * atom_norm more_loss["mae_v"] = self.display_if_exist(mae_v.detach(), find_virial) + + if self.has_ae and "atom_energy" in model_pred and "atom_ener" in label: + atom_ener = model_pred["atom_energy"] + atom_ener_label = label["atom_ener"] + find_atom_ener = label.get("find_atom_ener", 0.0) + pref_ae = pref_ae * find_atom_ener + atom_ener_reshape = atom_ener.reshape(-1) + atom_ener_label_reshape = atom_ener_label.reshape(-1) + l2_atom_ener_loss = torch.square( + atom_ener_label_reshape - atom_ener_reshape + ).mean() + if not self.inference: + more_loss["l2_atom_ener_loss"] = self.display_if_exist( + l2_atom_ener_loss.detach(), find_atom_ener + ) + loss += (pref_ae * l2_atom_ener_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_ae = l2_atom_ener_loss.sqrt() + more_loss["rmse_ae"] = self.display_if_exist( + rmse_ae.detach(), find_atom_ener + ) + if not self.inference: more_loss["rmse"] = torch.sqrt(loss.detach()) return model_pred, loss, more_loss @@ -288,4 +390,25 @@ def label_requirement(self) -> List[DataRequirementItem]: repeat=3, ) ) + if self.has_gf > 0: + label_requirement.append( + DataRequirementItem( + "drdq", + ndof=self.numb_generalized_coord * 3, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.enable_atom_ener_coeff: + label_requirement.append( + DataRequirementItem( + "atom_ener_coeff", + ndof=1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + ) + ) return label_requirement diff --git a/deepmd/pt/loss/ener_spin.py b/deepmd/pt/loss/ener_spin.py index 3bd81adf77..78210a778b 100644 --- a/deepmd/pt/loss/ener_spin.py +++ b/deepmd/pt/loss/ener_spin.py @@ -34,23 +34,53 @@ def __init__( limit_pref_v=0.0, start_pref_ae: float = 0.0, limit_pref_ae: float = 0.0, - start_pref_pf: float = 0.0, - limit_pref_pf: float = 0.0, + enable_atom_ener_coeff: bool = False, use_l1_all: bool = False, inference=False, **kwargs, ): - """Construct a layer to compute loss on energy, real force, magnetic force and virial.""" + r"""Construct a layer to compute loss on energy, real force, magnetic force and virial. + + Parameters + ---------- + starter_learning_rate : float + The learning rate at the start of the training. + start_pref_e : float + The prefactor of energy loss at the start of the training. + limit_pref_e : float + The prefactor of energy loss at the end of the training. + start_pref_fr : float + The prefactor of real force loss at the start of the training. + limit_pref_fr : float + The prefactor of real force loss at the end of the training. + start_pref_fm : float + The prefactor of magnetic force loss at the start of the training. + limit_pref_fm : float + The prefactor of magnetic force loss at the end of the training. + start_pref_v : float + The prefactor of virial loss at the start of the training. + limit_pref_v : float + The prefactor of virial loss at the end of the training. + start_pref_ae : float + The prefactor of atomic energy loss at the start of the training. + limit_pref_ae : float + The prefactor of atomic energy loss at the end of the training. + enable_atom_ener_coeff : bool + if true, the energy will be computed as \sum_i c_i E_i + use_l1_all : bool + Whether to use L1 loss, if False (default), it will use L2 loss. + inference : bool + If true, it will output all losses found in output, ignoring the pre-factors. + **kwargs + Other keyword arguments. + """ super().__init__() self.starter_learning_rate = starter_learning_rate self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference self.has_fr = (start_pref_fr != 0.0 and limit_pref_fr != 0.0) or inference self.has_fm = (start_pref_fm != 0.0 and limit_pref_fm != 0.0) or inference - - # TODO EnergySpinLoss needs support for virial, atomic energy and atomic pref self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference - self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference self.start_pref_e = start_pref_e self.limit_pref_e = limit_pref_e @@ -60,6 +90,9 @@ def __init__( self.limit_pref_fm = limit_pref_fm self.start_pref_v = start_pref_v self.limit_pref_v = limit_pref_v + self.start_pref_ae = start_pref_ae + self.limit_pref_ae = limit_pref_ae + self.enable_atom_ener_coeff = enable_atom_ener_coeff self.use_l1_all = use_l1_all self.inference = inference @@ -92,18 +125,32 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): pref_fr = self.limit_pref_fr + (self.start_pref_fr - self.limit_pref_fr) * coef pref_fm = self.limit_pref_fm + (self.start_pref_fm - self.limit_pref_fm) * coef pref_v = self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * coef + pref_ae = self.limit_pref_ae + (self.start_pref_ae - self.limit_pref_ae) * coef loss = torch.tensor(0.0, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) more_loss = {} # more_loss['log_keys'] = [] # showed when validation on the fly # more_loss['test_keys'] = [] # showed when doing dp test atom_norm = 1.0 / natoms if self.has_e and "energy" in model_pred and "energy" in label: + energy_pred = model_pred["energy"] + energy_label = label["energy"] + if self.enable_atom_ener_coeff and "atom_energy" in model_pred: + atom_ener_pred = model_pred["atom_energy"] + # when ener_coeff (\nu) is defined, the energy is defined as + # E = \sum_i \nu_i E_i + # instead of the sum of atomic energies. + # + # A case is that we want to train reaction energy + # A + B -> C + D + # E = - E(A) - E(B) + E(C) + E(D) + # A, B, C, D could be put far away from each other + atom_ener_coeff = label["atom_ener_coeff"] + atom_ener_coeff = atom_ener_coeff.reshape(atom_ener_pred.shape) + energy_pred = torch.sum(atom_ener_coeff * atom_ener_pred, dim=1) find_energy = label.get("find_energy", 0.0) pref_e = pref_e * find_energy if not self.use_l1_all: - l2_ener_loss = torch.mean( - torch.square(model_pred["energy"] - label["energy"]) - ) + l2_ener_loss = torch.mean(torch.square(energy_pred - energy_label)) if not self.inference: more_loss["l2_ener_loss"] = self.display_if_exist( l2_ener_loss.detach(), find_energy @@ -116,29 +163,24 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): # more_loss['log_keys'].append('rmse_e') else: # use l1 and for all atoms l1_ener_loss = F.l1_loss( - model_pred["energy"].reshape(-1), - label["energy"].reshape(-1), + energy_pred.reshape(-1), + energy_label.reshape(-1), reduction="sum", ) loss += pref_e * l1_ener_loss more_loss["mae_e"] = self.display_if_exist( F.l1_loss( - model_pred["energy"].reshape(-1), - label["energy"].reshape(-1), + energy_pred.reshape(-1), + energy_label.reshape(-1), reduction="mean", ).detach(), find_energy, ) # more_loss['log_keys'].append('rmse_e') if mae: - mae_e = ( - torch.mean(torch.abs(model_pred["energy"] - label["energy"])) - * atom_norm - ) + mae_e = torch.mean(torch.abs(energy_pred - energy_label)) * atom_norm more_loss["mae_e"] = self.display_if_exist(mae_e.detach(), find_energy) - mae_e_all = torch.mean( - torch.abs(model_pred["energy"] - label["energy"]) - ) + mae_e_all = torch.mean(torch.abs(energy_pred - energy_label)) more_loss["mae_e_all"] = self.display_if_exist( mae_e_all.detach(), find_energy ) @@ -209,6 +251,26 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): l1_force_mag_loss = l1_force_mag_loss.sum(-1).mean(-1).sum() loss += (pref_fm * l1_force_mag_loss).to(GLOBAL_PT_FLOAT_PRECISION) + if self.has_ae and "atom_energy" in model_pred and "atom_ener" in label: + atom_ener = model_pred["atom_energy"] + atom_ener_label = label["atom_ener"] + find_atom_ener = label.get("find_atom_ener", 0.0) + pref_ae = pref_ae * find_atom_ener + atom_ener_reshape = atom_ener.reshape(-1) + atom_ener_label_reshape = atom_ener_label.reshape(-1) + l2_atom_ener_loss = torch.square( + atom_ener_label_reshape - atom_ener_reshape + ).mean() + if not self.inference: + more_loss["l2_atom_ener_loss"] = self.display_if_exist( + l2_atom_ener_loss.detach(), find_atom_ener + ) + loss += (pref_ae * l2_atom_ener_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_ae = l2_atom_ener_loss.sqrt() + more_loss["rmse_ae"] = self.display_if_exist( + rmse_ae.detach(), find_atom_ener + ) + if not self.inference: more_loss["rmse"] = torch.sqrt(loss.detach()) return model_pred, loss, more_loss @@ -267,15 +329,4 @@ def label_requirement(self) -> List[DataRequirementItem]: high_prec=False, ) ) - if self.has_pf: - label_requirement.append( - DataRequirementItem( - "atom_pref", - ndof=1, - atomic=True, - must=False, - high_prec=False, - repeat=3, - ) - ) return label_requirement diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 6a3a947cb1..b80d2d4c38 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -172,6 +172,11 @@ class DescrptDPA1(BaseDescriptor, torch.nn.Module): Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. The default value is `None`, which means the `tebd_input_mode` setting will be used instead. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. spin (Only support None to keep consistent with other backend references.) (Not used in this version. Not-none option is not implemented.) @@ -220,6 +225,8 @@ def __init__( smooth_type_embedding: bool = True, type_one_side: bool = False, stripped_type_embedding: Optional[bool] = None, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, # not implemented spin=None, type: Optional[str] = None, @@ -270,7 +277,15 @@ def __init__( ln_eps=ln_eps, old_impl=old_impl, ) - self.type_embedding = TypeEmbedNet(ntypes, tebd_dim, precision=precision) + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + self.type_embedding = TypeEmbedNet( + ntypes, + tebd_dim, + precision=precision, + use_econf_tebd=use_econf_tebd, + type_map=type_map, + ) self.tebd_dim = tebd_dim self.concat_output_tebd = concat_output_tebd self.trainable = trainable @@ -415,6 +430,8 @@ def serialize(self) -> dict: "smooth_type_embedding": obj.smooth, "type_one_side": obj.type_one_side, "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, # make deterministic "precision": RESERVED_PRECISON_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index c2910e9414..600930bb7a 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -76,6 +76,8 @@ def __init__( trainable: bool = True, seed: Optional[int] = None, add_tebd_to_repinit_out: bool = False, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, old_impl: bool = False, ): r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. @@ -104,6 +106,11 @@ def __init__( (Unused yet) Random seed for parameter initialization. add_tebd_to_repinit_out : bool, optional Whether to add type embedding to the output representation from repinit before inputting it into repformer. + use_econf_tebd : bool, Optional + Whether to use electronic configuration type embedding. + type_map : List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. Returns ------- @@ -189,8 +196,14 @@ def init_subclass_params(sub_data, sub_class): ln_eps=self.repformer_args.ln_eps, old_impl=old_impl, ) + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map self.type_embedding = TypeEmbedNet( - ntypes, self.repinit_args.tebd_dim, precision=precision + ntypes, + self.repinit_args.tebd_dim, + precision=precision, + use_econf_tebd=self.use_econf_tebd, + type_map=type_map, ) self.concat_output_tebd = concat_output_tebd self.precision = precision @@ -368,6 +381,8 @@ def serialize(self) -> dict: "env_protection": self.env_protection, "trainable": self.trainable, "add_tebd_to_repinit_out": self.add_tebd_to_repinit_out, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "type_embedding": self.type_embedding.embedding.serialize(), "g1_shape_tranform": self.g1_shape_tranform.serialize(), } diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index da69650a21..1d46720af2 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -107,6 +107,8 @@ def get_zbl_model(model_params): ntypes = len(model_params["type_map"]) # descriptor model_params["descriptor"]["ntypes"] = ntypes + if model_params["descriptor"].get("use_econf_tebd", False): + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) descriptor = BaseDescriptor(**model_params["descriptor"]) # fitting fitting_net = model_params.get("fitting_net", None) @@ -152,6 +154,8 @@ def get_standard_model(model_params): ntypes = len(model_params["type_map"]) # descriptor model_params["descriptor"]["ntypes"] = ntypes + if model_params["descriptor"].get("use_econf_tebd", False): + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) descriptor = BaseDescriptor(**model_params["descriptor"]) # fitting fitting_net = model_params.get("fitting_net", None) diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 09d9945b3b..e5f76368bc 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -32,8 +32,12 @@ import torch.utils.checkpoint +from deepmd.dpmodel.common import ( + PRECISION_DICT, +) from deepmd.pt.utils.utils import ( ActivationFn, + to_torch_tensor, ) @@ -556,18 +560,31 @@ def forward(self, inputs): class TypeEmbedNet(nn.Module): - def __init__(self, type_nums, embed_dim, bavg=0.0, stddev=1.0, precision="default"): + def __init__( + self, + type_nums, + embed_dim, + bavg=0.0, + stddev=1.0, + precision="default", + use_econf_tebd=False, + type_map=None, + ): """Construct a type embedding net.""" super().__init__() self.type_nums = type_nums self.embed_dim = embed_dim self.bavg = bavg self.stddev = stddev + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map self.embedding = TypeEmbedNetConsistent( ntypes=self.type_nums, neuron=[self.embed_dim], padding=True, activation_function="Linear", + use_econf_tebd=use_econf_tebd, + type_map=type_map, precision=precision, ) # nn.init.normal_(self.embedding.weight[:-1], mean=bavg, std=stddev) @@ -622,6 +639,11 @@ class TypeEmbedNetConsistent(nn.Module): Random seed for initializing the network parameters. padding Concat the zero padding to the output, as the default embedding of empty type. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -635,6 +657,8 @@ def __init__( trainable: bool = True, seed: Optional[int] = None, padding: bool = False, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, ): """Construct a type embedding net.""" super().__init__() @@ -647,9 +671,37 @@ def __init__( self.activation_function = str(activation_function) self.trainable = trainable self.padding = padding + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + self.econf_tebd = None + embed_input_dim = ntypes + if self.use_econf_tebd: + from deepmd.utils.econf_embd import ( + ECONF_DIM, + electronic_configuration_embedding, + ) + from deepmd.utils.econf_embd import type_map as periodic_table + + assert ( + self.type_map is not None + ), "When using electronic configuration type embedding, type_map must be provided!" + + missing_types = [t for t in self.type_map if t not in periodic_table] + assert not missing_types, ( + "When using electronic configuration type embedding, " + "all element in type_map should be in periodic table! " + f"Found these invalid elements: {missing_types}" + ) + self.econf_tebd = to_torch_tensor( + np.array( + [electronic_configuration_embedding[kk] for kk in self.type_map], + dtype=PRECISION_DICT[self.precision], + ) + ) + embed_input_dim = ECONF_DIM # no way to pass seed? self.embedding_net = EmbeddingNet( - ntypes, + embed_input_dim, self.neuron, self.activation_function, self.resnet_dt, @@ -666,9 +718,13 @@ def forward(self, device: torch.device): type_embedding: torch.Tensor Type embedding network. """ - embed = self.embedding_net( - torch.eye(self.ntypes, dtype=self.prec, device=device) - ) + if not self.use_econf_tebd: + embed = self.embedding_net( + torch.eye(self.ntypes, dtype=self.prec, device=device) + ) + else: + assert self.econf_tebd is not None + embed = self.embedding_net(self.econf_tebd) if self.padding: embed = torch.cat( [embed, torch.zeros(1, embed.shape[1], dtype=self.prec, device=device)] @@ -717,6 +773,8 @@ def serialize(self) -> dict: "activation_function": self.activation_function, "trainable": self.trainable, "padding": self.padding, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "embedding": self.embedding_net.serialize(), } diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 8cbc0ab689..43c38b0955 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -2031,6 +2031,11 @@ class DescrptDPA1Compat(DescrptSeAtten): Whether to use smooth process in attention weights calculation. concat_output_tebd: bool Whether to concat type embedding at the output of the descriptor. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. spin (Only support None to keep consistent with old implementation.) The old implementation of deepspin. @@ -2065,6 +2070,8 @@ def __init__( ln_eps: Optional[float] = 1e-3, smooth_type_embedding: bool = True, concat_output_tebd: bool = True, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, spin: Optional[Any] = None, # consistent with argcheck, not used though seed: Optional[int] = None, @@ -2113,6 +2120,8 @@ def __init__( env_protection=env_protection, ) self.tebd_dim = tebd_dim + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map self.scaling_factor = scaling_factor self.normalize = normalize self.temperature = temperature @@ -2121,6 +2130,8 @@ def __init__( neuron=[self.tebd_dim], padding=True, activation_function="Linear", + use_econf_tebd=use_econf_tebd, + type_map=type_map, # precision=precision, ) self.concat_output_tebd = concat_output_tebd @@ -2303,6 +2314,8 @@ def serialize(self, suffix: str = "") -> dict: "normalize": self.normalize, "temperature": self.temperature, "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "type_embedding": self.type_embedding.serialize(suffix), } ) diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index ccbd44cf97..825ac6704a 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -10,6 +10,7 @@ List, Optional, Tuple, + Type, Union, ) @@ -262,7 +263,7 @@ def _init_attr(self): @property @lru_cache(maxsize=None) - def model_type(self) -> "DeepEvalWrapper": + def model_type(self) -> Type["DeepEvalWrapper"]: """Get type of model. :type:str @@ -693,13 +694,13 @@ def _get_natoms_and_nframes( def eval( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, atomic: bool = False, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, efield: Optional[np.ndarray] = None, - **kwargs: Dict[str, Any], + **kwargs: Any, ) -> Dict[str, np.ndarray]: """Evaluate the energy, force and virial by using this DP. @@ -1023,7 +1024,7 @@ def _get_output_shape(self, odef, nframes, natoms): def eval_descriptor( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, @@ -1080,7 +1081,7 @@ def eval_descriptor( def _eval_descriptor_inner( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: np.ndarray, fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, diff --git a/deepmd/tf/infer/deep_tensor.py b/deepmd/tf/infer/deep_tensor.py index 59fdab7cd1..c3ca22847e 100644 --- a/deepmd/tf/infer/deep_tensor.py +++ b/deepmd/tf/infer/deep_tensor.py @@ -146,7 +146,7 @@ def get_dim_aparam(self) -> int: def eval( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: List[int], atomic: bool = True, fparam: Optional[np.ndarray] = None, @@ -276,7 +276,7 @@ def eval( def eval_full( self, coords: np.ndarray, - cells: np.ndarray, + cells: Optional[np.ndarray], atom_types: List[int], atomic: bool = False, fparam: Optional[np.array] = None, diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 2718d53cbe..5914dcf48d 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -679,6 +679,8 @@ def __init__( if type_embedding is not None and isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding elif type_embedding is not None: + if type_embedding.get("use_econf_tebd", False): + type_embedding["type_map"] = type_map self.typeebd = TypeEmbedNet( ntypes=self.ntypes, **type_embedding, diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index 92e943d486..6b0e95e88b 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -82,6 +82,8 @@ def __init__( if isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding else: + if type_embedding.get("use_econf_tebd", False): + type_embedding["type_map"] = type_map self.typeebd = TypeEmbedNet( ntypes=self.ntypes, **type_embedding, diff --git a/deepmd/tf/utils/type_embed.py b/deepmd/tf/utils/type_embed.py index 0f566027c1..77a0744ea4 100644 --- a/deepmd/tf/utils/type_embed.py +++ b/deepmd/tf/utils/type_embed.py @@ -6,6 +6,11 @@ Union, ) +import numpy as np + +from deepmd.dpmodel.common import ( + PRECISION_DICT, +) from deepmd.dpmodel.utils.network import ( EmbeddingNet, ) @@ -95,6 +100,11 @@ class TypeEmbedNet: Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed padding Concat the zero padding to the output, as the default embedding of empty type. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -109,6 +119,8 @@ def __init__( seed: Optional[int] = None, uniform_seed: bool = False, padding: bool = False, + use_econf_tebd: bool = False, + type_map: Optional[List[str]] = None, **kwargs, ) -> None: """Constructor.""" @@ -123,6 +135,28 @@ def __init__( self.uniform_seed = uniform_seed self.type_embedding_net_variables = None self.padding = padding + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + if self.use_econf_tebd: + from deepmd.utils.econf_embd import ( + electronic_configuration_embedding, + ) + from deepmd.utils.econf_embd import type_map as periodic_table + + assert ( + self.type_map is not None + ), "When using electronic configuration type embedding, type_map must be provided!" + + missing_types = [t for t in self.type_map if t not in periodic_table] + assert not missing_types, ( + "When using electronic configuration type embedding, " + "all element in type_map should be in periodic table! " + f"Found these invalid elements: {missing_types}" + ) + self.econf_tebd = np.array( + [electronic_configuration_embedding[kk] for kk in self.type_map], + dtype=PRECISION_DICT[precision], + ) self.model_type = None def build( @@ -148,12 +182,18 @@ def build( The computational graph for embedded types """ assert ntypes == self.ntypes - types = tf.convert_to_tensor(list(range(ntypes)), dtype=tf.int32) - ebd_type = tf.cast( - tf.one_hot(tf.cast(types, dtype=tf.int32), int(ntypes)), - self.filter_precision, - ) - ebd_type = tf.reshape(ebd_type, [-1, ntypes]) + if not self.use_econf_tebd: + types = tf.convert_to_tensor(list(range(ntypes)), dtype=tf.int32) + ebd_type = tf.cast( + tf.one_hot(tf.cast(types, dtype=tf.int32), int(ntypes)), + self.filter_precision, + ) + else: + ebd_type = tf.cast( + tf.convert_to_tensor(self.econf_tebd), + self.filter_precision, + ) + ebd_type = tf.reshape(ebd_type, [ntypes, -1]) name = "type_embed_net" + suffix if ( nvnmd_cfg.enable @@ -271,8 +311,15 @@ def serialize(self, suffix: str = "") -> dict: else: type_embedding_pattern = TYPE_EMBEDDING_PATTERN assert self.type_embedding_net_variables is not None + embed_input_dim = self.ntypes + if self.use_econf_tebd: + from deepmd.utils.econf_embd import ( + ECONF_DIM, + ) + + embed_input_dim = ECONF_DIM embedding_net = EmbeddingNet( - in_dim=self.ntypes, + in_dim=embed_input_dim, neuron=self.neuron, activation_function=self.filter_activation_fn_name, resnet_dt=self.filter_resnet_dt, @@ -297,5 +344,7 @@ def serialize(self, suffix: str = "") -> dict: "activation_function": self.filter_activation_fn_name, "trainable": self.trainable, "padding": self.padding, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, "embedding": embedding_net.serialize(), } diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 6678f62c63..0f88a33773 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -80,6 +80,7 @@ def type_embedding_args(): doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net are trainable" + doc_use_econf_tebd = "Whether to use electronic configuration type embedding." return [ Argument("neuron", List[int], optional=True, default=[8], doc=doc_neuron), @@ -94,6 +95,9 @@ def type_embedding_args(): Argument("precision", str, optional=True, default="default", doc=doc_precision), Argument("trainable", bool, optional=True, default=True, doc=doc_trainable), Argument("seed", [int, None], optional=True, default=None, doc=doc_seed), + Argument( + "use_econf_tebd", bool, optional=True, default=False, doc=doc_use_econf_tebd + ), ] @@ -506,6 +510,7 @@ def descrpt_se_atten_args(): ) doc_ln_eps = "The epsilon value for layer normalization. The default value for TensorFlow is set to 1e-3 to keep consistent with keras while set to 1e-5 in PyTorch and DP implementation." doc_tebd_dim = "The dimension of atom type embedding." + doc_use_econf_tebd = r"Whether to use electronic configuration type embedding. For TensorFlow backend, please set `use_econf_tebd` in `type_embedding` block instead." doc_temperature = "The scaling factor of normalization in calculations of attention weights, which is used to scale the matmul(Q, K)." doc_scaling_factor = ( "The scaling factor of normalization in calculations of attention weights, which is used to scale the matmul(Q, K). " @@ -566,6 +571,13 @@ def descrpt_se_atten_args(): default=8, doc=doc_only_pt_supported + doc_tebd_dim, ), + Argument( + "use_econf_tebd", + bool, + optional=True, + default=False, + doc=doc_only_pt_supported + doc_use_econf_tebd, + ), Argument( "tebd_input_mode", str, @@ -634,6 +646,7 @@ def descrpt_dpa2_args(): doc_trainable = "If the parameters in the embedding net is trainable." doc_seed = "Random seed for parameter initialization." doc_add_tebd_to_repinit_out = "Add type embedding to the output representation from repinit before inputting it into repformer." + doc_use_econf_tebd = "Whether to use electronic configuration type embedding." return [ # repinit args Argument("repinit", dict, dpa2_repinit_args(), doc=doc_repinit), @@ -673,6 +686,13 @@ def descrpt_dpa2_args(): alias=["repformer_add_type_ebd_to_seq"], doc=doc_add_tebd_to_repinit_out, ), + Argument( + "use_econf_tebd", + bool, + optional=True, + default=False, + doc=doc_only_pt_supported + doc_use_econf_tebd, + ), ] diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index 3940db65ba..a1b427ac7d 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -17,109 +17,124 @@ # fmt: off electronic_configuration_embedding = \ { kk: np.array(vv, dtype=np.int32) for kk,vv in { - "H" : [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "He" : [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Li" : [2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Be" : [2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "B" : [2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "C" : [2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "N" : [2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "O" : [2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "F" : [2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ne" : [2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Na" : [2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mg" : [2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Al" : [2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Si" : [2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "P" : [2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "S" : [2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cl" : [2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ar" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "K" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ca" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sc" : [2,2,2,2,2,2,2,2,2,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ti" : [2,2,2,2,2,2,2,2,2,1,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "V" : [2,2,2,2,2,2,2,2,2,1,1,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cr" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mn" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Fe" : [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Co" : [2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ni" : [2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Zn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ga" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ge" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "As" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Se" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Br" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Kr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Rb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Y" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Zr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Nb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Mo" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Tc" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ru" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Rh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Pd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Ag" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "In" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Sb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Te" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "I" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Xe" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], - "Cs" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0], - "Ba" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "La" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Ce" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Pr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Nd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Pm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Sm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Eu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Gd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Tb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Dy" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Ho" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Er" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Tm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Yb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Lu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Hf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Ta" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "W" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Re" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Os" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Ir" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Pt" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0], - "Au" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0], - "Hg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0], - "Tl" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0], - "Pb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0], - "Bi" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0], - "Po" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0], - "At" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0], - "Rn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0], - "Fr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,1], - "Ra" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,2], - "Ac" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2], - "Th" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,1,0,0,0,2], - "Pa" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2], - "U" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,1,0,0,0,0,2], - "Np" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,1,0,0,0,0,2], - "Pu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,2], - "Am" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2], - "Cm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,2], - "Bk" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2], - "Cf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,2], - "Es" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,2], - "Fm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,2], - "Md" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,2], - "No" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,2], - "Lr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,2], + "H" : [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "He" : [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Li" : [2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Be" : [2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "B" : [2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "C" : [2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "N" : [2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "O" : [2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "F" : [2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ne" : [2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Na" : [2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Mg" : [2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Al" : [2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Si" : [2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "P" : [2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "S" : [2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Cl" : [2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ar" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "K" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ca" : [2,2,2,2,2,2,2,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Sc" : [2,2,2,2,2,2,2,2,2,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ti" : [2,2,2,2,2,2,2,2,2,1,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "V" : [2,2,2,2,2,2,2,2,2,1,1,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Cr" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Mn" : [2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Fe" : [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Co" : [2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ni" : [2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Cu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Zn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ga" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ge" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "As" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Se" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Br" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Kr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Rb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Sr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Y" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Zr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Nb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Mo" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Tc" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ru" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Rh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Pd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Ag" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Cd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "In" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Sn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Sb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Te" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "I" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Xe" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "Cs" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], + "Ba" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "La" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Ce" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Pr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Nd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Pm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Sm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Eu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Gd" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Tb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Dy" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Ho" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Er" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Tm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Yb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Lu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Hf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Ta" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "W" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Re" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Os" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Ir" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Pt" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], + "Au" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], + "Hg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0], + "Tl" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0], + "Pb" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0], + "Bi" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,1,1,1,0,0,0,0,0,0,0,0,0], + "Po" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,0,0,0], + "At" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,1,0,0,0,0,0,0,0,0,0], + "Rn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,0,0,0,0], + "Fr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,1,0,0,0], + "Ra" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Ac" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], + "Th" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,2,2,2,2,1,1,0,0,0,2,0,0,0], + "Pa" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], + "U" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], + "Np" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,0,0,2,2,2,2,1,0,0,0,0,2,0,0,0], + "Pu" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Am" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Cm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,2,2,2,2,1,0,0,0,0,2,0,0,0], + "Bk" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Cf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Es" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Fm" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Md" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,0,0,0,0,0,2,0,0,0], + "No" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0,0,0,2,0,0,0], + "Lr" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0,0,2,0,0,0], + "Rf" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,2,0,0,0], + "Db" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,0,0,2,0,0,0], + "Sg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,0,2,0,0,0], + "Bh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,2,0,0,0], + "Hs" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0], + "Mt" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,0,0,0], + "Ds" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0], + "Rg" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0,0], + "Cn" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,0], + "Nh" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,0,0], + "Fl" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0], + "Mc" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1], + "Lv" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1], + "Ts" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1], + "Og" : [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2], }.items()} # fmt: on @@ -151,6 +166,7 @@ (6, "p"), (6, "d"), (7, "s"), + (7, "p"), ] maxn = 7 @@ -158,6 +174,7 @@ maxm = 2 * maxl + 1 type_map = dpdata.periodic_table.ELEMENTS +ECONF_DIM = electronic_configuration_embedding[type_map[0]].shape[0] def make_empty_list_vec(): diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index 67674c4654..67462c91d4 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -396,7 +396,7 @@ } ], "source": [ - "! cat DeePMD-kit_Tutorial/00.data/training_data/type.raw " + "! cat DeePMD-kit_Tutorial/00.data/training_data/type.raw" ] }, { diff --git a/pyproject.toml b/pyproject.toml index 23d42e73d2..80d5ad9ee9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -298,6 +298,7 @@ docstring-code-format = true [tool.ruff.lint] select = [ "E", # errors + "W", # warning "F", # pyflakes "D", # pydocstyle "UP", # pyupgrade diff --git a/source/tests/common/test_econf_embd.py b/source/tests/common/test_econf_embd.py index e2f314e460..3c4d1a3b4a 100644 --- a/source/tests/common/test_econf_embd.py +++ b/source/tests/common/test_econf_embd.py @@ -37,19 +37,20 @@ def test_fe(self): (6, "p"): [0, 0, 0], (6, "d"): [0, 0, 0, 0, 0], (7, "s"): [0], + (7, "p"): [0, 0, 0], } self.assertDictEqual({kk: list(vv) for kk, vv in res.items()}, expected_res) def test_fe_flatten(self): res = make_econf_embedding(["Fe"], flatten=True)["Fe"] # fmt: off - expected_res = [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + expected_res = [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # fmt: on self.assertEqual(list(res), expected_res) def test_dict(self): res = electronic_configuration_embedding["Fe"] # fmt: off - expected_res = [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + expected_res = [2,2,2,2,2,2,2,2,2,2,1,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # fmt: on self.assertEqual(list(res), expected_res) diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index a2d4ca074f..5e9ea01602 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -2,6 +2,7 @@ import unittest from typing import ( Any, + Optional, Tuple, ) @@ -54,8 +55,9 @@ (None, 1.0), # temperature (1e-5,), # ln_eps (True, False), # smooth_type_embedding - (True, False), # concat_output_tebd + (True,), # concat_output_tebd ("float64",), # precision + (True, False), # use_econf_tebd ) class TestDPA1(CommonTest, DescriptorTest, unittest.TestCase): @property @@ -78,6 +80,7 @@ def data(self) -> dict: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param return { "sel": [10], @@ -104,9 +107,20 @@ def data(self) -> dict: "precision": precision, "set_davg_zero": set_davg_zero, "smooth_type_embedding": smooth_type_embedding, + "use_econf_tebd": use_econf_tebd, + "type_map": ["O", "H"] if use_econf_tebd else None, "seed": 1145141919810, } + def is_meaningless_zero_attention_layer_tests( + self, + attn_layer: int, + attn_dotr: bool, + normalize: bool, + temperature: Optional[float], + ) -> bool: + return attn_layer == 0 and (attn_dotr or normalize or temperature is not None) + @property def skip_pt(self) -> bool: ( @@ -127,8 +141,14 @@ def skip_pt(self) -> bool: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param - return CommonTest.skip_pt + return CommonTest.skip_pt or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) @property def skip_dp(self) -> bool: @@ -150,8 +170,14 @@ def skip_dp(self) -> bool: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param - return CommonTest.skip_pt + return CommonTest.skip_pt or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) @property def skip_tf(self) -> bool: @@ -173,15 +199,25 @@ def skip_tf(self) -> bool: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param # TODO (excluded_types != [] and attn_layer > 0) need fix return ( - env_protection != 0.0 - or smooth_type_embedding - or not normalize - or temperature != 1.0 - or (excluded_types != [] and attn_layer > 0) - or (type_one_side and tebd_input_mode == "strip") # not consistent yet + CommonTest.skip_tf + or ( + env_protection != 0.0 + or smooth_type_embedding + or not normalize + or temperature != 1.0 + or (excluded_types != [] and attn_layer > 0) + or (type_one_side and tebd_input_mode == "strip") # not consistent yet + ) + or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + attn_dotr, + normalize, + temperature, + ) ) tf_class = DescrptDPA1TF @@ -240,6 +276,7 @@ def setUp(self): smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: @@ -296,6 +333,7 @@ def rtol(self) -> float: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param if precision == "float64": return 1e-10 @@ -325,6 +363,7 @@ def atol(self) -> float: smooth_type_embedding, concat_output_tebd, precision, + use_econf_tebd, ) = self.param if precision == "float64": return 1e-10 diff --git a/source/tests/consistent/descriptor/test_dpa2.py b/source/tests/consistent/descriptor/test_dpa2.py index a3cf5303bd..25887aa4e5 100644 --- a/source/tests/consistent/descriptor/test_dpa2.py +++ b/source/tests/consistent/descriptor/test_dpa2.py @@ -53,7 +53,7 @@ (True,), # repformer_update_g2_has_g1g1 (True,), # repformer_update_g2_has_attn (False,), # repformer_update_h2 - (True, False), # repformer_attn2_has_gate + (True,), # repformer_attn2_has_gate ("res_avg", "res_residual"), # repformer_update_style ("norm", "const"), # repformer_update_residual_init (True,), # repformer_set_davg_zero @@ -63,6 +63,7 @@ ([], [[0, 1]]), # exclude_types ("float64",), # precision (True, False), # add_tebd_to_repinit_out + (True, False), # use_econf_tebd ) class TestDPA2(CommonTest, DescriptorTest, unittest.TestCase): @property @@ -89,6 +90,7 @@ def data(self) -> dict: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param return { "ntypes": self.ntypes, @@ -146,6 +148,8 @@ def data(self) -> dict: "exclude_types": exclude_types, "env_protection": 0.0, "trainable": True, + "use_econf_tebd": use_econf_tebd, + "type_map": ["O", "H"] if use_econf_tebd else None, "add_tebd_to_repinit_out": add_tebd_to_repinit_out, } @@ -173,6 +177,7 @@ def skip_pt(self) -> bool: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param return CommonTest.skip_pt @@ -200,6 +205,7 @@ def skip_dp(self) -> bool: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param return CommonTest.skip_pt @@ -227,6 +233,7 @@ def skip_tf(self) -> bool: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param return True @@ -290,6 +297,7 @@ def setUp(self): exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: @@ -350,6 +358,7 @@ def rtol(self) -> float: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param if precision == "float64": return 1e-10 @@ -383,9 +392,10 @@ def atol(self) -> float: exclude_types, precision, add_tebd_to_repinit_out, + use_econf_tebd, ) = self.param if precision == "float64": - return 1e-10 + return 1e-8 elif precision == "float32": return 1e-4 else: diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index 2e20142a66..cf358771b3 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -36,6 +36,7 @@ (True, False), # resnet_dt ("float32", "float64"), # precision (True, False), # padding + (True, False), # use_econf_tebd ) class TestTypeEmbedding(CommonTest, unittest.TestCase): """Useful utilities for descriptor tests.""" @@ -46,11 +47,13 @@ def data(self) -> dict: resnet_dt, precision, padding, + use_econf_tebd, ) = self.param return { "neuron": [2, 4, 4], "resnet_dt": resnet_dt, "precision": precision, + "use_econf_tebd": use_econf_tebd, "seed": 20240327, } @@ -65,11 +68,13 @@ def addtional_data(self) -> dict: resnet_dt, precision, padding, + use_econf_tebd, ) = self.param # implict argument not input by users return { "ntypes": self.ntypes, "padding": padding, + "type_map": ["O", "H"] if use_econf_tebd else None, } def setUp(self): @@ -104,6 +109,7 @@ def rtol(self) -> float: resnet_dt, precision, padding, + use_econf_tebd, ) = self.param if precision == "float64": return 1e-10 @@ -121,6 +127,7 @@ def atol(self) -> float: resnet_dt, precision, padding, + use_econf_tebd, ) = self.param if precision == "float64": return 1e-10 diff --git a/source/tests/pt/model/test_dpa1.py b/source/tests/pt/model/test_dpa1.py index 01cbf259a7..17b2f1bc30 100644 --- a/source/tests/pt/model/test_dpa1.py +++ b/source/tests/pt/model/test_dpa1.py @@ -39,12 +39,15 @@ def test_consistency( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, sm, to, tm, prec in itertools.product( + for idt, sm, to, tm, prec, ect in itertools.product( [False, True], # resnet_dt [False, True], # smooth_type_embedding [False, True], # type_one_side ["concat", "strip"], # tebd_input_mode - ["float64", "float32"], # precision + [ + "float64", + ], # precision + [False, True], # use_econf_tebd ): dtype = PRECISION_DICT[prec] rtol, atol = get_tols(prec) @@ -62,6 +65,8 @@ def test_consistency( smooth_type_embedding=sm, type_one_side=to, tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, old_impl=False, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -100,7 +105,13 @@ def test_consistency( err_msg=err_msg, ) # old impl - if idt is False and prec == "float64" and to is False and tm == "concat": + if ( + idt is False + and prec == "float64" + and to is False + and tm == "concat" + and ect is False + ): dd3 = DescrptDPA1( self.rcut, self.rcut_smth, @@ -165,7 +176,7 @@ def test_jit( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, prec, sm, to, tm in itertools.product( + for idt, prec, sm, to, tm, ect in itertools.product( [ False, ], # resnet_dt @@ -173,8 +184,11 @@ def test_jit( "float64", ], # precision [False, True], # smooth_type_embedding - [False, True], # type_one_side + [ + False, + ], # type_one_side ["concat", "strip"], # tebd_input_mode + [False, True], # use_econf_tebd ): dtype = PRECISION_DICT[prec] rtol, atol = get_tols(prec) @@ -190,6 +204,8 @@ def test_jit( smooth_type_embedding=sm, type_one_side=to, tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, old_impl=False, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dpa2.py b/source/tests/pt/model/test_dpa2.py index 0bff470f76..6d3b6e182d 100644 --- a/source/tests/pt/model/test_dpa2.py +++ b/source/tests/pt/model/test_dpa2.py @@ -61,6 +61,7 @@ def test_consistency( rpz, sm, prec, + ect, ) in itertools.product( ["concat", "strip"], # repinit_tebd_input_mode [ @@ -75,18 +76,21 @@ def test_consistency( [ False, ], # repformer_update_h2 - [True, False], # repformer_attn2_has_gate + [ + True, + ], # repformer_attn2_has_gate ["res_avg", "res_residual"], # repformer_update_style [ True, ], # repformer_set_davg_zero [True, False], # smooth ["float64"], # precision + [False, True], # use_econf_tebd ): dtype = PRECISION_DICT[prec] rtol, atol = get_tols(prec) if prec == "float64": - atol = 1e-11 # marginal GPU test cases... + atol = 1e-8 # marginal GPU test cases... repinit = RepinitArgs( rcut=self.rcut, @@ -129,6 +133,8 @@ def test_consistency( exclude_types=[], add_tebd_to_repinit_out=False, precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, old_impl=False, ).to(env.DEVICE) @@ -168,7 +174,7 @@ def test_consistency( atol=atol, ) # old impl - if prec == "float64" and rus == "res_avg": + if prec == "float64" and rus == "res_avg" and ect is False: dd3 = DescrptDPA2( self.nt, repinit=repinit, @@ -232,6 +238,7 @@ def test_jit( rpz, sm, prec, + ect, ) in itertools.product( ["concat", "strip"], # repinit_tebd_input_mode [ @@ -269,6 +276,7 @@ def test_jit( True, ], # smooth ["float64"], # precision + [False, True], # use_econf_tebd ): dtype = PRECISION_DICT[prec] rtol, atol = get_tols(prec) @@ -314,6 +322,8 @@ def test_jit( exclude_types=[], add_tebd_to_repinit_out=False, precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, old_impl=False, ).to(env.DEVICE) diff --git a/source/tests/pt/model/test_fitting_net.py b/source/tests/pt/model/test_fitting_net.py index c7e1723799..900b01d687 100644 --- a/source/tests/pt/model/test_fitting_net.py +++ b/source/tests/pt/model/test_fitting_net.py @@ -113,7 +113,7 @@ def test_consistency(self): ).to(env.DEVICE) for name, param in my_fn.named_parameters(): matched = re.match( - "filter_layers\.networks\.(\d).layers\.(\d)\.([a-z]+)", name + r"filter_layers\.networks\.(\d).layers\.(\d)\.([a-z]+)", name ) key = None if matched: diff --git a/source/tests/pt/test_loss.py b/source/tests/pt/test_loss.py index dcd59cd56e..72ea961c37 100644 --- a/source/tests/pt/test_loss.py +++ b/source/tests/pt/test_loss.py @@ -7,9 +7,6 @@ import torch tf.disable_eager_execution() -from copy import ( - deepcopy, -) from pathlib import ( Path, ) @@ -46,48 +43,102 @@ def get_batch(system, type_map, data_requirement): return np_batch, pt_batch -class TestEnerStdLoss(unittest.TestCase): +class LossCommonTest(unittest.TestCase): def setUp(self): - self.system = str(Path(__file__).parent / "water/data/data_0") - self.type_map = ["H", "O"] - self.start_lr = 1.1 - self.start_pref_e = 0.02 - self.limit_pref_e = 1.0 - self.start_pref_f = 1000.0 - self.limit_pref_f = 1.0 - self.start_pref_v = 0.02 - self.limit_pref_v = 1.0 self.cur_lr = 1.2 + if not self.spin: + self.system = str(Path(__file__).parent / "water/data/data_0") + self.type_map = ["H", "O"] + else: + self.system = str(Path(__file__).parent / "NiO/data/data_0") + self.type_map = ["Ni", "O"] + energy_data_requirement.append( + DataRequirementItem( + "force_mag", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ) + ) # data np_batch, pt_batch = get_batch( self.system, self.type_map, energy_data_requirement ) natoms = np_batch["natoms"] self.nloc = natoms[0] - l_energy, l_force, l_virial = ( - np_batch["energy"], - np_batch["force"], - np_batch["virial"], - ) - p_energy, p_force, p_virial = ( - np.ones_like(l_energy), - np.ones_like(l_force), - np.ones_like(l_virial), - ) - nloc = natoms[0] - batch_size = pt_batch["coord"].shape[0] - atom_energy = np.zeros(shape=[batch_size, nloc]) - atom_pref = np.zeros(shape=[batch_size, nloc * 3]) + nframes = np_batch["energy"].shape[0] + rng = np.random.default_rng() + + if not self.spin: + l_energy, l_force, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["virial"], + ) + p_energy, p_force, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force), + np.ones_like(l_virial), + ) + nloc = natoms[0] + batch_size = pt_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + # placeholders + l_force_real = l_force + l_force_mag = l_force + p_force_real = p_force + p_force_mag = p_force + else: + # data + np_batch, pt_batch = get_batch( + self.system, self.type_map, energy_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + l_energy, l_force_real, l_force_mag, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["force_mag"], + np_batch["virial"], + ) + # merged force for tf old implement + l_force_merge_tf = np.concatenate( + [ + l_force_real.reshape(nframes, self.nloc, 3), + l_force_mag.reshape(nframes, self.nloc, 3)[ + np_batch["atype"] == 0 + ].reshape(nframes, -1, 3), + ], + axis=1, + ).reshape(nframes, -1) + p_energy, p_force_real, p_force_mag, p_force_merge_tf, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force_real), + np.ones_like(l_force_mag), + np.ones_like(l_force_merge_tf), + np.ones_like(l_virial), + ) + virt_nloc = (np_batch["atype"] == 0).sum(-1) + natoms_tf = np.concatenate([natoms, virt_nloc], axis=0) + natoms_tf[:2] += virt_nloc + nloc = natoms_tf[0] + batch_size = pt_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + self.nloc_tf = nloc + natoms = natoms_tf + l_force = l_force_merge_tf + p_force = p_force_merge_tf + # tf - base = EnerStdLoss( - self.start_lr, - self.start_pref_e, - self.limit_pref_e, - self.start_pref_f, - self.limit_pref_f, - self.start_pref_v, - self.limit_pref_v, - ) self.g = tf.Graph() with self.g.as_default(): t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) @@ -101,11 +152,15 @@ def setUp(self): t_lvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) t_latom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) t_atom_pref = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_ener_coeff = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_drdq = tf.placeholder(shape=[None, None], dtype=tf.float64) find_energy = tf.constant(1.0, dtype=tf.float64) find_force = tf.constant(1.0, dtype=tf.float64) - find_virial = tf.constant(1.0, dtype=tf.float64) - find_atom_energy = tf.constant(0.0, dtype=tf.float64) - find_atom_pref = tf.constant(0.0, dtype=tf.float64) + find_virial = tf.constant(1.0 if not self.spin else 0.0, dtype=tf.float64) + find_atom_energy = tf.constant(1.0, dtype=tf.float64) + find_atom_pref = tf.constant(1.0, dtype=tf.float64) + find_drdq = tf.constant(1.0, dtype=tf.float64) + find_atom_ener_coeff = tf.constant(1.0, dtype=tf.float64) model_dict = { "energy": t_penergy, "force": t_pforce, @@ -118,59 +173,359 @@ def setUp(self): "virial": t_lvirial, "atom_ener": t_latom_energy, "atom_pref": t_atom_pref, + "drdq": t_drdq, + "atom_ener_coeff": t_atom_ener_coeff, "find_energy": find_energy, "find_force": find_force, "find_virial": find_virial, "find_atom_ener": find_atom_energy, "find_atom_pref": find_atom_pref, + "find_drdq": find_drdq, + "find_atom_ener_coeff": find_atom_ener_coeff, } - self.base_loss_sess = base.build( + self.tf_loss_sess = self.tf_loss.build( t_cur_lr, t_natoms, model_dict, label_dict, "" ) - # torch + self.feed_dict = { t_cur_lr: self.cur_lr, t_natoms: natoms, t_penergy: p_energy, t_pforce: p_force, t_pvirial: p_virial.reshape(-1, 9), - t_patom_energy: atom_energy, + t_patom_energy: p_atom_energy, t_lenergy: l_energy, t_lforce: l_force, t_lvirial: l_virial.reshape(-1, 9), - t_latom_energy: atom_energy, + t_latom_energy: l_atom_energy, t_atom_pref: atom_pref, + t_drdq: drdq, + t_atom_ener_coeff: atom_ener_coeff, } - self.model_pred = { - "energy": torch.from_numpy(p_energy), - "force": torch.from_numpy(p_force), - "virial": torch.from_numpy(p_virial), - } - self.label = { - "energy": torch.from_numpy(l_energy), - "find_energy": 1.0, - "force": torch.from_numpy(l_force), - "find_force": 1.0, - "virial": torch.from_numpy(l_virial), - "find_virial": 1.0, - } - self.label_absent = { - "energy": torch.from_numpy(l_energy), - "force": torch.from_numpy(l_force), - "virial": torch.from_numpy(l_virial), - } + # pt + if not self.spin: + self.model_pred = { + "energy": torch.from_numpy(p_energy), + "force": torch.from_numpy(p_force), + "virial": torch.from_numpy(p_virial), + "atom_energy": torch.from_numpy(p_atom_energy), + } + self.label = { + "energy": torch.from_numpy(l_energy), + "find_energy": 1.0, + "force": torch.from_numpy(l_force), + "find_force": 1.0, + "virial": torch.from_numpy(l_virial), + "find_virial": 1.0, + "atom_ener": torch.from_numpy(l_atom_energy), + "find_atom_ener": 1.0, + "atom_pref": torch.from_numpy(atom_pref), + "find_atom_pref": 1.0, + "drdq": torch.from_numpy(drdq), + "find_drdq": 1.0, + "atom_ener_coeff": torch.from_numpy(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": torch.from_numpy(l_energy), + "force": torch.from_numpy(l_force), + "virial": torch.from_numpy(l_virial), + "atom_ener": torch.from_numpy(l_atom_energy), + "atom_pref": torch.from_numpy(atom_pref), + "drdq": torch.from_numpy(drdq), + "atom_ener_coeff": torch.from_numpy(atom_ener_coeff), + } + else: + self.model_pred = { + "energy": torch.from_numpy(p_energy), + "force": torch.from_numpy(p_force_real).reshape(nframes, self.nloc, 3), + "force_mag": torch.from_numpy(p_force_mag).reshape( + nframes, self.nloc, 3 + ), + "mask_mag": torch.from_numpy(np_batch["atype"] == 0).reshape( + nframes, self.nloc, 1 + ), + "atom_energy": torch.from_numpy(p_atom_energy), + } + self.label = { + "energy": torch.from_numpy(l_energy), + "find_energy": 1.0, + "force": torch.from_numpy(l_force_real).reshape(nframes, self.nloc, 3), + "find_force": 1.0, + "force_mag": torch.from_numpy(l_force_mag).reshape( + nframes, self.nloc, 3 + ), + "find_force_mag": 1.0, + "atom_ener": torch.from_numpy(l_atom_energy), + "find_atom_ener": 1.0, + "atom_ener_coeff": torch.from_numpy(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": torch.from_numpy(l_energy), + "force": torch.from_numpy(l_force_real).reshape(nframes, self.nloc, 3), + "force_mag": torch.from_numpy(l_force_mag).reshape( + nframes, self.nloc, 3 + ), + "atom_ener": torch.from_numpy(l_atom_energy), + "atom_ener_coeff": torch.from_numpy(atom_ener_coeff), + } self.natoms = pt_batch["natoms"] def tearDown(self) -> None: tf.reset_default_graph() return super().tearDown() + +class TestEnerStdLoss(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + # pt + self.pt_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + self.spin = False + super().setUp() + def test_consistency(self): with tf.Session(graph=self.g) as sess: - base_loss, base_more_loss = sess.run( - self.base_loss_sess, feed_dict=self.feed_dict + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + + +class TestEnerStdLossAePfGf(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + self.start_pref_ae = 0.02 + self.limit_pref_ae = 1.0 + self.start_pref_pf = 0.02 + self.limit_pref_pf = 1.0 + self.start_pref_gf = 0.02 + self.limit_pref_gf = 1.0 + self.numb_generalized_coord = 2 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + # pt + self.pt_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["ener", "force", "virial", "atom_ener", "pref_force", "gen_force"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + + +class TestEnerStdLossAecoeff(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + # pt + self.pt_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) ) - mine = EnergyStdLoss( + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + + +class TestEnerStdLossRelativeF(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + relative_f=0.1, + ) + # pt + self.pt_loss = EnergyStdLoss( self.start_lr, self.start_pref_e, self.limit_pref_e, @@ -178,42 +533,49 @@ def test_consistency(self): self.limit_pref_f, self.start_pref_v, self.limit_pref_v, + relative_f=0.1, ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) def fake_model(): return self.model_pred - _, my_loss, my_more_loss = mine( + _, pt_loss, pt_more_loss = self.pt_loss( {}, fake_model, self.label, self.nloc, self.cur_lr, ) - _, my_loss_absent, my_more_loss_absent = mine( + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( {}, fake_model, self.label_absent, self.nloc, self.cur_lr, ) - my_loss = my_loss.detach().cpu() - my_loss_absent = my_loss_absent.detach().cpu() - self.assertTrue(np.allclose(base_loss, my_loss.numpy())) - self.assertTrue(np.allclose(0.0, my_loss_absent.numpy())) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) for key in ["ener", "force", "virial"]: self.assertTrue( np.allclose( - base_more_loss[f"l2_{key}_loss"], my_more_loss[f"l2_{key}_loss"] + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] ) ) - self.assertTrue(np.isnan(my_more_loss_absent[f"l2_{key}_loss"])) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) -class TestEnerSpinLoss(unittest.TestCase): +class TestEnerSpinLoss(LossCommonTest): def setUp(self): - self.system = str(Path(__file__).parent / "NiO/data/data_0") - self.type_map = ["Ni", "O"] self.start_lr = 1.1 self.start_pref_e = 0.02 self.limit_pref_e = 1.0 @@ -223,56 +585,81 @@ def setUp(self): self.limit_pref_fm = 1.0 self.cur_lr = 1.2 self.use_spin = [1, 0] - # data - spin_data_requirement = deepcopy(energy_data_requirement) - spin_data_requirement.append( - DataRequirementItem( - "force_mag", - ndof=3, - atomic=True, - must=False, - high_prec=False, - ) + # tf + self.tf_loss = EnerSpinLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_fr, + self.limit_pref_fr, + self.start_pref_fm, + self.limit_pref_fm, + use_spin=self.use_spin, ) - np_batch, pt_batch = get_batch( - self.system, self.type_map, spin_data_requirement + # pt + self.pt_loss = EnergySpinLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_fr, + self.limit_pref_fr, + self.start_pref_fm, + self.limit_pref_fm, ) - natoms = np_batch["natoms"] - self.nloc = natoms[0] - nframes = np_batch["energy"].shape[0] - l_energy, l_force_real, l_force_mag, l_virial = ( - np_batch["energy"], - np_batch["force"], - np_batch["force_mag"], - np_batch["virial"], + self.spin = True + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc_tf, # use tf natoms pref + self.cur_lr, ) - # merged force for tf old implement - l_force_merge_tf = np.concatenate( - [ - l_force_real.reshape(nframes, self.nloc, 3), - l_force_mag.reshape(nframes, self.nloc, 3)[ - np_batch["atype"] == 0 - ].reshape(nframes, -1, 3), - ], - axis=1, - ).reshape(nframes, -1) - p_energy, p_force_real, p_force_mag, p_force_merge_tf, p_virial = ( - np.ones_like(l_energy), - np.ones_like(l_force_real), - np.ones_like(l_force_mag), - np.ones_like(l_force_merge_tf), - np.ones_like(l_virial), + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc_tf, # use tf natoms pref + self.cur_lr, ) - virt_nloc = (np_batch["atype"] == 0).sum(-1) - natoms_tf = np.concatenate([natoms, virt_nloc], axis=0) - natoms_tf[:2] += virt_nloc - nloc = natoms_tf[0] - batch_size = pt_batch["coord"].shape[0] - atom_energy = np.zeros(shape=[batch_size, nloc]) - atom_pref = np.zeros(shape=[batch_size, nloc * 3]) - self.nloc_tf = nloc + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["ener", "force_r", "force_m"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + + +class TestEnerSpinLossAe(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_fr = 1000.0 + self.limit_pref_fr = 1.0 + self.start_pref_fm = 1000.0 + self.limit_pref_fm = 1.0 + self.start_pref_ae = 0.02 + self.limit_pref_ae = 1.0 + self.cur_lr = 1.2 + self.use_spin = [1, 0] # tf - base = EnerSpinLoss( + self.tf_loss = EnerSpinLoss( self.start_lr, self.start_pref_e, self.limit_pref_e, @@ -280,94 +667,74 @@ def setUp(self): self.limit_pref_fr, self.start_pref_fm, self.limit_pref_fm, + start_pref_ae=self.start_pref_ae, + limit_pref_ae=self.limit_pref_ae, use_spin=self.use_spin, ) - self.g = tf.Graph() - with self.g.as_default(): - t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) - t_natoms = tf.placeholder(shape=[None], dtype=tf.int32) - t_penergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) - t_pforce = tf.placeholder(shape=[None, None], dtype=tf.float64) - t_pvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) - t_patom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) - t_lenergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) - t_lforce = tf.placeholder(shape=[None, None], dtype=tf.float64) - t_lvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) - t_latom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) - t_atom_pref = tf.placeholder(shape=[None, None], dtype=tf.float64) - find_energy = tf.constant(1.0, dtype=tf.float64) - find_force = tf.constant(1.0, dtype=tf.float64) - find_virial = tf.constant(0.0, dtype=tf.float64) - find_atom_energy = tf.constant(0.0, dtype=tf.float64) - find_atom_pref = tf.constant(0.0, dtype=tf.float64) - model_dict = { - "energy": t_penergy, - "force": t_pforce, - "virial": t_pvirial, - "atom_ener": t_patom_energy, - } - label_dict = { - "energy": t_lenergy, - "force": t_lforce, - "virial": t_lvirial, - "atom_ener": t_latom_energy, - "atom_pref": t_atom_pref, - "find_energy": find_energy, - "find_force": find_force, - "find_virial": find_virial, - "find_atom_ener": find_atom_energy, - "find_atom_pref": find_atom_pref, - } - self.base_loss_sess = base.build( - t_cur_lr, t_natoms, model_dict, label_dict, "" - ) - # torch - self.feed_dict = { - t_cur_lr: self.cur_lr, - t_natoms: natoms_tf, - t_penergy: p_energy, - t_pforce: p_force_merge_tf, - t_pvirial: p_virial.reshape(-1, 9), - t_patom_energy: atom_energy, - t_lenergy: l_energy, - t_lforce: l_force_merge_tf, - t_lvirial: l_virial.reshape(-1, 9), - t_latom_energy: atom_energy, - t_atom_pref: atom_pref, - } - self.model_pred = { - "energy": torch.from_numpy(p_energy), - "force": torch.from_numpy(p_force_real).reshape(nframes, self.nloc, 3), - "force_mag": torch.from_numpy(p_force_mag).reshape(nframes, self.nloc, 3), - "mask_mag": torch.from_numpy(np_batch["atype"] == 0).reshape( - nframes, self.nloc, 1 - ), - } - self.label = { - "energy": torch.from_numpy(l_energy), - "find_energy": 1.0, - "force": torch.from_numpy(l_force_real).reshape(nframes, self.nloc, 3), - "find_force": 1.0, - "force_mag": torch.from_numpy(l_force_mag).reshape(nframes, self.nloc, 3), - "find_force_mag": 1.0, - } - self.label_absent = { - "energy": torch.from_numpy(l_energy), - "force": torch.from_numpy(l_force_real).reshape(nframes, self.nloc, 3), - "force_mag": torch.from_numpy(l_force_mag).reshape(nframes, self.nloc, 3), - } - self.natoms = pt_batch["natoms"] - - def tearDown(self) -> None: - tf.reset_default_graph() - return super().tearDown() + # pt + self.pt_loss = EnergySpinLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_fr, + self.limit_pref_fr, + self.start_pref_fm, + self.limit_pref_fm, + start_pref_ae=self.start_pref_ae, + limit_pref_ae=self.limit_pref_ae, + ) + self.spin = True + super().setUp() def test_consistency(self): with tf.Session(graph=self.g) as sess: - base_loss, base_more_loss = sess.run( - self.base_loss_sess, feed_dict=self.feed_dict + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict ) - mine = EnergySpinLoss( + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc_tf, # use tf natoms pref + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc_tf, # use tf natoms pref + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["ener", "force_r", "force_m", "atom_ener"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + + +class TestEnerSpinLossAecoeff(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_fr = 1000.0 + self.limit_pref_fr = 1.0 + self.start_pref_fm = 1000.0 + self.limit_pref_fm = 1.0 + self.cur_lr = 1.2 + self.use_spin = [1, 0] + # tf + self.tf_loss = EnerSpinLoss( self.start_lr, self.start_pref_e, self.limit_pref_e, @@ -375,36 +742,57 @@ def test_consistency(self): self.limit_pref_fr, self.start_pref_fm, self.limit_pref_fm, + use_spin=self.use_spin, + enable_atom_ener_coeff=True, ) + # pt + self.pt_loss = EnergySpinLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_fr, + self.limit_pref_fr, + self.start_pref_fm, + self.limit_pref_fm, + enable_atom_ener_coeff=True, + ) + self.spin = True + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) def fake_model(): return self.model_pred - _, my_loss, my_more_loss = mine( + _, pt_loss, pt_more_loss = self.pt_loss( {}, fake_model, self.label, self.nloc_tf, # use tf natoms pref self.cur_lr, ) - _, my_loss_absent, my_more_loss_absent = mine( + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( {}, fake_model, self.label_absent, self.nloc_tf, # use tf natoms pref self.cur_lr, ) - my_loss = my_loss.detach().cpu() - my_loss_absent = my_loss_absent.detach().cpu() - self.assertTrue(np.allclose(base_loss, my_loss.numpy())) - self.assertTrue(np.allclose(0.0, my_loss_absent.numpy())) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) for key in ["ener", "force_r", "force_m"]: self.assertTrue( np.allclose( - base_more_loss[f"l2_{key}_loss"], my_more_loss[f"l2_{key}_loss"] + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] ) ) - self.assertTrue(np.isnan(my_more_loss_absent[f"l2_{key}_loss"])) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) if __name__ == "__main__":