diff --git a/.buildinfo b/.buildinfo index 853cdb2ae..f09e2f817 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: ad184163ed18f9ae867121d9a28f54ab +config: 798fcda09776c00173518fd6e6e6aaa7 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt index 49ba55daf..328df84a5 100644 --- a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt +++ b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt @@ -1 +1,186 @@ -python: can't open file '/home/runner/work/fairchem/fairchem/src/fairchem/main.py': [Errno 2] No such file or directory +2024-05-10 20:06:03 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem +/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch/cuda/amp/grad_scaler.py:126: UserWarning: torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling. + warnings.warn( +2024-05-10 20:06:04 (WARNING): Detected old config, converting to new format. Consider updating to avoid potential incompatibilities. +2024-05-10 20:06:04 (INFO): amp: true +cmd: + checkpoint_dir: fine-tuning/checkpoints/2024-05-10-20-05-20-ft-oxides + commit: af82609 + identifier: ft-oxides + logs_dir: fine-tuning/logs/tensorboard/2024-05-10-20-05-20-ft-oxides + print_every: 10 + results_dir: fine-tuning/results/2024-05-10-20-05-20-ft-oxides + seed: 0 + timestamp_id: 2024-05-10-20-05-20-ft-oxides +dataset: + a2g_args: + r_energy: true + r_forces: true + format: ase_db + key_mapping: + force: forces + y: energy + src: train.db +eval_metrics: + metrics: + energy: + - mae + forces: + - forcesx_mae + - forcesy_mae + - forcesz_mae + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold +gpus: 0 +logger: tensorboard +loss_fns: +- energy: + coefficient: 1 + fn: mae +- forces: + coefficient: 1 + fn: l2mae +model: gemnet_oc +model_attributes: + activation: silu + atom_edge_interaction: true + atom_interaction: true + cbf: + name: spherical_harmonics + cutoff: 12.0 + cutoff_aeaint: 12.0 + cutoff_aint: 12.0 + cutoff_qint: 12.0 + direct_forces: true + edge_atom_interaction: true + emb_size_aint_in: 64 + emb_size_aint_out: 64 + emb_size_atom: 256 + emb_size_cbf: 16 + emb_size_edge: 512 + emb_size_quad_in: 32 + emb_size_quad_out: 32 + emb_size_rbf: 16 + emb_size_sbf: 32 + emb_size_trip_in: 64 + emb_size_trip_out: 64 + envelope: + exponent: 5 + name: polynomial + extensive: true + forces_coupled: false + max_neighbors: 30 + max_neighbors_aeaint: 20 + max_neighbors_aint: 1000 + max_neighbors_qint: 8 + num_after_skip: 2 + num_atom: 3 + num_atom_emb_layers: 2 + num_before_skip: 2 + num_blocks: 4 + num_concat: 1 + num_global_out_layers: 2 + num_output_afteratom: 3 + num_radial: 128 + num_spherical: 7 + otf_graph: true + output_init: HeOrthogonal + qint_tags: + - 1 + - 2 + quad_interaction: true + rbf: + name: gaussian + regress_forces: true + sbf: + name: legendre_outer + symmetric_edge_symmetrization: false +noddp: false +optim: + batch_size: 4 + clip_grad_norm: 10 + ema_decay: 0.999 + energy_coefficient: 1 + eval_batch_size: 16 + eval_every: 10 + factor: 0.8 + force_coefficient: 1 + load_balancing: atoms + loss_energy: mae + lr_initial: 0.0005 + max_epochs: 1 + mode: min + num_workers: 2 + optimizer: AdamW + optimizer_params: + amsgrad: true + patience: 3 + scheduler: ReduceLROnPlateau + weight_decay: 0 +outputs: + energy: + level: system + forces: + eval_on_free_atoms: true + level: atom + train_on_free_atoms: false +slurm: {} +task: + dataset: ase_db +test_dataset: + a2g_args: + r_energy: false + r_forces: false + src: test.db +trainer: ocp +val_dataset: + a2g_args: + r_energy: true + r_forces: true + src: val.db + +2024-05-10 20:06:04 (INFO): Loading dataset: ase_db +2024-05-10 20:06:05 (INFO): rank: 0: Sampler created... +2024-05-10 20:06:05 (INFO): Batch balancing is disabled for single GPU training. +2024-05-10 20:06:05 (INFO): rank: 0: Sampler created... +2024-05-10 20:06:05 (INFO): Batch balancing is disabled for single GPU training. +2024-05-10 20:06:05 (INFO): rank: 0: Sampler created... +2024-05-10 20:06:05 (INFO): Batch balancing is disabled for single GPU training. +2024-05-10 20:06:05 (INFO): Loading model: gemnet_oc +2024-05-10 20:06:05 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization'] +2024-05-10 20:06:07 (INFO): Loaded GemNetOC with 38864438 parameters. +2024-05-10 20:06:07 (WARNING): Model gradient logging to tensorboard not yet supported. +2024-05-10 20:06:07 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated. +2024-05-10 20:06:07 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gnoc_oc22_oc20_all_s2ef.pt +2024-05-10 20:06:07 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. +/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + storage = elem.storage()._new_shared(numel) +/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + storage = elem.storage()._new_shared(numel) +/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch/amp/autocast_mode.py:250: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling + warnings.warn( +2024-05-10 20:06:31 (INFO): energy_forces_within_threshold: 0.00e+00, energy_mae: 6.77e+00, forcesx_mae: 4.11e-02, forcesy_mae: 3.63e-02, forcesz_mae: 5.26e-02, forces_mae: 4.33e-02, forces_cosine_similarity: 8.24e-02, forces_magnitude_error: 7.42e-02, loss: 6.86e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01 +2024-05-10 20:06:31 (INFO): Evaluating on val. + device 0: 0%| | 0/2 [00:00 + main() + File "/home/runner/work/fairchem/fairchem/src/fairchem/core/_cli.py", line 89, in main + Runner()(config) + File "/home/runner/work/fairchem/fairchem/src/fairchem/core/_cli.py", line 41, in __call__ + self.task.run() + File "/home/runner/work/fairchem/fairchem/src/fairchem/core/tasks/task.py", line 51, in run + self.trainer.train( + File "/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py", line 201, in train + self.update_best( + File "/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py", line 671, in update_best + "mae" in primary_metric +TypeError: argument of type 'NoneType' is not iterable diff --git a/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt new file mode 100644 index 000000000..5d0745892 --- /dev/null +++ b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt @@ -0,0 +1,145 @@ +2024-05-10 20:08:47 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem +/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch/cuda/amp/grad_scaler.py:126: UserWarning: torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling. + warnings.warn( +2024-05-10 20:08:49 (WARNING): Detected old config, converting to new format. Consider updating to avoid potential incompatibilities. +2024-05-10 20:08:49 (INFO): amp: true +cmd: + checkpoint_dir: ./checkpoints/2024-05-10-20-09-36 + commit: af82609 + identifier: '' + logs_dir: ./logs/tensorboard/2024-05-10-20-09-36 + print_every: 10 + results_dir: ./results/2024-05-10-20-09-36 + seed: 0 + timestamp_id: 2024-05-10-20-09-36 +dataset: + a2g_args: + r_energy: false + r_forces: false + format: ase_db + key_mapping: + force: forces + y: energy + select_args: + selection: natoms>5,xc=PBE + src: data.db +eval_metrics: + metrics: + energy: + - mae + forces: + - forcesx_mae + - forcesy_mae + - forcesz_mae + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold +gpus: 0 +logger: tensorboard +loss_fns: +- energy: + coefficient: 1 + fn: mae +- forces: + coefficient: 1 + fn: l2mae +model: gemnet_t +model_attributes: + activation: silu + cbf: + name: spherical_harmonics + cutoff: 6.0 + direct_forces: true + emb_size_atom: 512 + emb_size_bil_trip: 64 + emb_size_cbf: 16 + emb_size_edge: 512 + emb_size_rbf: 16 + emb_size_trip: 64 + envelope: + exponent: 5 + name: polynomial + extensive: true + max_neighbors: 50 + num_after_skip: 2 + num_atom: 3 + num_before_skip: 1 + num_blocks: 3 + num_concat: 1 + num_radial: 128 + num_spherical: 7 + otf_graph: true + output_init: HeOrthogonal + rbf: + name: gaussian + regress_forces: true +noddp: false +optim: + batch_size: 16 + clip_grad_norm: 10 + ema_decay: 0.999 + energy_coefficient: 1 + eval_batch_size: 16 + eval_every: 5000 + force_coefficient: 1 + loss_energy: mae + loss_force: atomwisel2 + lr_gamma: 0.8 + lr_initial: 0.0005 + lr_milestones: + - 64000 + - 96000 + - 128000 + - 160000 + - 192000 + max_epochs: 80 + num_workers: 2 + optimizer: AdamW + optimizer_params: + amsgrad: true + warmup_steps: -1 +outputs: + energy: + level: system + forces: + eval_on_free_atoms: true + level: atom + train_on_free_atoms: false +slurm: {} +task: + dataset: ase_db + prediction_dtype: float32 +test_dataset: + a2g_args: + r_energy: false + r_forces: false + select_args: + selection: natoms>5,xc=PBE + src: data.db +trainer: ocp +val_dataset: null + +2024-05-10 20:08:49 (INFO): Loading dataset: ase_db +2024-05-10 20:08:49 (INFO): rank: 0: Sampler created... +2024-05-10 20:08:49 (INFO): Batch balancing is disabled for single GPU training. +2024-05-10 20:08:49 (INFO): rank: 0: Sampler created... +2024-05-10 20:08:49 (INFO): Batch balancing is disabled for single GPU training. +2024-05-10 20:08:49 (INFO): Loading model: gemnet_t +2024-05-10 20:08:51 (INFO): Loaded GemNetT with 31671825 parameters. +2024-05-10 20:08:51 (WARNING): Model gradient logging to tensorboard not yet supported. +2024-05-10 20:08:51 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gndt_oc22_all_s2ef.pt +2024-05-10 20:08:51 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. +2024-05-10 20:08:51 (WARNING): Scale factor comment not found in model +2024-05-10 20:08:51 (INFO): Predicting on test. + device 0: 0%| | 0/3 [00:00 None + + + .. py:method:: checkpoint(*args, **kwargs) + + Resubmits the same callable with the same arguments + + + +.. py:function:: main() + + Run the main fairchem program. + + diff --git a/_sources/autoapi/core/common/data_parallel/index.rst b/_sources/autoapi/core/common/data_parallel/index.rst new file mode 100644 index 000000000..d0d2b201f --- /dev/null +++ b/_sources/autoapi/core/common/data_parallel/index.rst @@ -0,0 +1,169 @@ +:py:mod:`core.common.data_parallel` +=================================== + +.. py:module:: core.common.data_parallel + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.data_parallel.OCPCollater + core.common.data_parallel._HasMetadata + core.common.data_parallel.StatefulDistributedSampler + core.common.data_parallel.BalancedBatchSampler + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.data_parallel.balanced_partition + + + +.. py:class:: OCPCollater(otf_graph: bool = False) + + + .. py:method:: __call__(data_list: list[torch_geometric.data.Data]) -> torch_geometric.data.Batch + + + +.. py:function:: balanced_partition(sizes: numpy.typing.NDArray[numpy.int_], num_parts: int) + + Greedily partition the given set by always inserting + the largest element into the smallest partition. + + +.. py:class:: _HasMetadata + + + Bases: :py:obj:`Protocol` + + Base class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing). + + For example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + + .. py:property:: metadata_path + :type: pathlib.Path + + + +.. py:class:: StatefulDistributedSampler(dataset, batch_size, **kwargs) + + + Bases: :py:obj:`torch.utils.data.DistributedSampler` + + More fine-grained state DataSampler that uses training iteration and epoch + both for shuffling data. PyTorch DistributedSampler only uses epoch + for the shuffling and starts sampling data from the start. In case of training + on very large data, we train for one epoch only and when we resume training, + we want to resume the data sampler from the training iteration. + + .. py:method:: __iter__() + + + .. py:method:: set_epoch_and_start_iteration(epoch, start_iter) + + + +.. py:class:: BalancedBatchSampler(dataset, batch_size: int, num_replicas: int, rank: int, device: torch.device, mode: str | bool = 'atoms', shuffle: bool = True, drop_last: bool = False, force_balancing: bool = False, throw_on_error: bool = False) + + + Bases: :py:obj:`torch.utils.data.Sampler` + + Base class for all Samplers. + + Every Sampler subclass has to provide an :meth:`__iter__` method, providing a + way to iterate over indices or lists of indices (batches) of dataset elements, and a :meth:`__len__` method + that returns the length of the returned iterators. + + :param data_source: This argument is not used and will be removed in 2.2.0. + You may still have custom implementation that utilizes it. + :type data_source: Dataset + + .. rubric:: Example + + >>> # xdoctest: +SKIP + >>> class AccedingSequenceLengthSampler(Sampler[int]): + >>> def __init__(self, data: List[str]) -> None: + >>> self.data = data + >>> + >>> def __len__(self) -> int: + >>> return len(self.data) + >>> + >>> def __iter__(self) -> Iterator[int]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> yield from torch.argsort(sizes).tolist() + >>> + >>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]): + >>> def __init__(self, data: List[str], batch_size: int) -> None: + >>> self.data = data + >>> self.batch_size = batch_size + >>> + >>> def __len__(self) -> int: + >>> return (len(self.data) + self.batch_size - 1) // self.batch_size + >>> + >>> def __iter__(self) -> Iterator[List[int]]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> for batch in torch.chunk(torch.argsort(sizes), len(self)): + >>> yield batch.tolist() + + .. note:: The :meth:`__len__` method isn't strictly required by + :class:`~torch.utils.data.DataLoader`, but is expected in any + calculation involving the length of a :class:`~torch.utils.data.DataLoader`. + + .. py:method:: _load_dataset(dataset, mode: Literal[atoms, neighbors]) + + + .. py:method:: __len__() -> int + + + .. py:method:: set_epoch_and_start_iteration(epoch: int, start_iteration: int) -> None + + + .. py:method:: __iter__() + + + diff --git a/_sources/autoapi/core/common/distutils/index.rst b/_sources/autoapi/core/common/distutils/index.rst new file mode 100644 index 000000000..271f2e842 --- /dev/null +++ b/_sources/autoapi/core/common/distutils/index.rst @@ -0,0 +1,88 @@ +:py:mod:`core.common.distutils` +=============================== + +.. py:module:: core.common.distutils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.distutils.os_environ_get_or_throw + core.common.distutils.setup + core.common.distutils.cleanup + core.common.distutils.initialized + core.common.distutils.get_rank + core.common.distutils.get_world_size + core.common.distutils.is_master + core.common.distutils.synchronize + core.common.distutils.broadcast + core.common.distutils.all_reduce + core.common.distutils.all_gather + core.common.distutils.gather_objects + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.common.distutils.T + + +.. py:data:: T + + + +.. py:function:: os_environ_get_or_throw(x: str) -> str + + +.. py:function:: setup(config) -> None + + +.. py:function:: cleanup() -> None + + +.. py:function:: initialized() -> bool + + +.. py:function:: get_rank() -> int + + +.. py:function:: get_world_size() -> int + + +.. py:function:: is_master() -> bool + + +.. py:function:: synchronize() -> None + + +.. py:function:: broadcast(tensor: torch.Tensor, src, group=dist.group.WORLD, async_op: bool = False) -> None + + +.. py:function:: all_reduce(data, group=dist.group.WORLD, average: bool = False, device=None) -> torch.Tensor + + +.. py:function:: all_gather(data, group=dist.group.WORLD, device=None) -> list[torch.Tensor] + + +.. py:function:: gather_objects(data: T, group: torch.distributed.ProcessGroup = dist.group.WORLD) -> list[T] + + Gather a list of pickleable objects into rank 0 + + diff --git a/_sources/autoapi/core/common/flags/index.rst b/_sources/autoapi/core/common/flags/index.rst new file mode 100644 index 000000000..b0c123919 --- /dev/null +++ b/_sources/autoapi/core/common/flags/index.rst @@ -0,0 +1,49 @@ +:py:mod:`core.common.flags` +=========================== + +.. py:module:: core.common.flags + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.flags.Flags + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.common.flags.flags + + +.. py:class:: Flags + + + .. py:method:: get_parser() -> argparse.ArgumentParser + + + .. py:method:: add_core_args() -> None + + + +.. py:data:: flags + + + diff --git a/_sources/autoapi/core/common/gp_utils/index.rst b/_sources/autoapi/core/common/gp_utils/index.rst new file mode 100644 index 000000000..342636b59 --- /dev/null +++ b/_sources/autoapi/core/common/gp_utils/index.rst @@ -0,0 +1,570 @@ +:py:mod:`core.common.gp_utils` +============================== + +.. py:module:: core.common.gp_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.gp_utils.CopyToModelParallelRegion + core.common.gp_utils.ReduceFromModelParallelRegion + core.common.gp_utils.ScatterToModelParallelRegion + core.common.gp_utils.GatherFromModelParallelRegion + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.gp_utils.ensure_div + core.common.gp_utils.divide_and_check_no_remainder + core.common.gp_utils.setup_gp + core.common.gp_utils.cleanup_gp + core.common.gp_utils.initialized + core.common.gp_utils.get_dp_group + core.common.gp_utils.get_gp_group + core.common.gp_utils.get_dp_rank + core.common.gp_utils.get_gp_rank + core.common.gp_utils.get_dp_world_size + core.common.gp_utils.get_gp_world_size + core.common.gp_utils.pad_tensor + core.common.gp_utils.trim_tensor + core.common.gp_utils._split_tensor + core.common.gp_utils._reduce + core.common.gp_utils._split + core.common.gp_utils._gather + core.common.gp_utils._gather_with_padding + core.common.gp_utils.copy_to_model_parallel_region + core.common.gp_utils.reduce_from_model_parallel_region + core.common.gp_utils.scatter_to_model_parallel_region + core.common.gp_utils.gather_from_model_parallel_region + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.common.gp_utils._GRAPH_PARALLEL_GROUP + core.common.gp_utils._DATA_PARALLEL_GROUP + + +.. py:data:: _GRAPH_PARALLEL_GROUP + + + +.. py:data:: _DATA_PARALLEL_GROUP + + + +.. py:function:: ensure_div(a: int, b: int) -> None + + +.. py:function:: divide_and_check_no_remainder(a: int, b: int) -> int + + +.. py:function:: setup_gp(config) -> None + + +.. py:function:: cleanup_gp() -> None + + +.. py:function:: initialized() -> bool + + +.. py:function:: get_dp_group() + + +.. py:function:: get_gp_group() + + +.. py:function:: get_dp_rank() -> int + + +.. py:function:: get_gp_rank() -> int + + +.. py:function:: get_dp_world_size() -> int + + +.. py:function:: get_gp_world_size() -> int + + +.. py:function:: pad_tensor(tensor: torch.Tensor, dim: int = -1, target_size: int | None = None) -> torch.Tensor + + +.. py:function:: trim_tensor(tensor: torch.Tensor, sizes: torch.Tensor | None = None, dim: int = 0) + + +.. py:function:: _split_tensor(tensor: torch.Tensor, num_parts: int, dim: int = -1, contiguous_chunks: bool = False) + + +.. py:function:: _reduce(ctx: Any, input: torch.Tensor) -> torch.Tensor + + +.. py:function:: _split(input: torch.Tensor, dim: int = -1) -> torch.Tensor + + +.. py:function:: _gather(input: torch.Tensor, dim: int = -1) -> torch.Tensor + + +.. py:function:: _gather_with_padding(input: torch.Tensor, dim: int = -1) -> torch.Tensor + + +.. py:class:: CopyToModelParallelRegion(*args, **kwargs) + + + Bases: :py:obj:`torch.autograd.Function` + + Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + + .. py:method:: forward(ctx, input: torch.Tensor) -> torch.Tensor + :staticmethod: + + Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + + + .. py:method:: backward(ctx, grad_output: torch.Tensor) -> torch.Tensor + :staticmethod: + + Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + + + +.. py:class:: ReduceFromModelParallelRegion(*args, **kwargs) + + + Bases: :py:obj:`torch.autograd.Function` + + Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + + .. py:method:: forward(ctx, input: torch.Tensor) -> torch.Tensor + :staticmethod: + + Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + + + .. py:method:: backward(ctx, grad_output: torch.Tensor) -> torch.Tensor + :staticmethod: + + Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + + + +.. py:class:: ScatterToModelParallelRegion(*args, **kwargs) + + + Bases: :py:obj:`torch.autograd.Function` + + Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + + .. py:method:: forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor + :staticmethod: + + Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + + + .. py:method:: backward(ctx, grad_output: torch.Tensor) + :staticmethod: + + Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + + + +.. py:class:: GatherFromModelParallelRegion(*args, **kwargs) + + + Bases: :py:obj:`torch.autograd.Function` + + Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + + .. py:method:: forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor + :staticmethod: + + Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + + + .. py:method:: backward(ctx, grad_output: torch.Tensor) + :staticmethod: + + Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + + + +.. py:function:: copy_to_model_parallel_region(input: torch.Tensor) -> torch.Tensor + + +.. py:function:: reduce_from_model_parallel_region(input: torch.Tensor) -> torch.Tensor + + +.. py:function:: scatter_to_model_parallel_region(input: torch.Tensor, dim: int = -1) -> torch.Tensor + + +.. py:function:: gather_from_model_parallel_region(input: torch.Tensor, dim: int = -1) -> torch.Tensor + + diff --git a/_sources/autoapi/core/common/hpo_utils/index.rst b/_sources/autoapi/core/common/hpo_utils/index.rst new file mode 100644 index 000000000..50b5178b7 --- /dev/null +++ b/_sources/autoapi/core/common/hpo_utils/index.rst @@ -0,0 +1,49 @@ +:py:mod:`core.common.hpo_utils` +=============================== + +.. py:module:: core.common.hpo_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.hpo_utils.tune_reporter + core.common.hpo_utils.label_metric_dict + + + +.. py:function:: tune_reporter(iters, train_metrics, val_metrics, test_metrics=None, metric_to_opt: str = 'val_loss', min_max: str = 'min') -> None + + Wrapper function for tune.report() + + :param iters: dict with training iteration info (e.g. steps, epochs) + :type iters: dict + :param train_metrics: train metrics dict + :type train_metrics: dict + :param val_metrics: val metrics dict + :type val_metrics: dict + :param test_metrics: test metrics dict, default is None + :type test_metrics: dict, optional + :param metric_to_opt: str for val metric to optimize, default is val_loss + :type metric_to_opt: str, optional + :param min_max: either "min" or "max", determines whether metric_to_opt is to be minimized or maximized, default is min + :type min_max: str, optional + + +.. py:function:: label_metric_dict(metric_dict, split) + + diff --git a/_sources/autoapi/core/common/index.rst b/_sources/autoapi/core/common/index.rst new file mode 100644 index 000000000..412aa24f0 --- /dev/null +++ b/_sources/autoapi/core/common/index.rst @@ -0,0 +1,42 @@ +:py:mod:`core.common` +===================== + +.. py:module:: core.common + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + relaxation/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + data_parallel/index.rst + distutils/index.rst + flags/index.rst + gp_utils/index.rst + hpo_utils/index.rst + logger/index.rst + registry/index.rst + transforms/index.rst + tutorial_utils/index.rst + typing/index.rst + utils/index.rst + + diff --git a/_sources/autoapi/core/common/logger/index.rst b/_sources/autoapi/core/common/logger/index.rst new file mode 100644 index 000000000..5beb6e62f --- /dev/null +++ b/_sources/autoapi/core/common/logger/index.rst @@ -0,0 +1,107 @@ +:py:mod:`core.common.logger` +============================ + +.. py:module:: core.common.logger + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.logger.Logger + core.common.logger.WandBLogger + core.common.logger.TensorboardLogger + + + + +.. py:class:: Logger(config) + + + Bases: :py:obj:`abc.ABC` + + Generic class to interface with various logging modules, e.g. wandb, + tensorboard, etc. + + .. py:method:: watch(model) + :abstractmethod: + + Monitor parameters and gradients. + + + .. py:method:: log(update_dict, step: int, split: str = '') + + Log some values. + + + .. py:method:: log_plots(plots) -> None + :abstractmethod: + + + .. py:method:: mark_preempting() -> None + :abstractmethod: + + + +.. py:class:: WandBLogger(config) + + + Bases: :py:obj:`Logger` + + Generic class to interface with various logging modules, e.g. wandb, + tensorboard, etc. + + .. py:method:: watch(model) -> None + + Monitor parameters and gradients. + + + .. py:method:: log(update_dict, step: int, split: str = '') -> None + + Log some values. + + + .. py:method:: log_plots(plots, caption: str = '') -> None + + + .. py:method:: mark_preempting() -> None + + + +.. py:class:: TensorboardLogger(config) + + + Bases: :py:obj:`Logger` + + Generic class to interface with various logging modules, e.g. wandb, + tensorboard, etc. + + .. py:method:: watch(model) -> bool + + Monitor parameters and gradients. + + + .. py:method:: log(update_dict, step: int, split: str = '') + + Log some values. + + + .. py:method:: mark_preempting() -> None + + + .. py:method:: log_plots(plots) -> None + + + diff --git a/_sources/autoapi/core/common/registry/index.rst b/_sources/autoapi/core/common/registry/index.rst new file mode 100644 index 000000000..e926f05d1 --- /dev/null +++ b/_sources/autoapi/core/common/registry/index.rst @@ -0,0 +1,244 @@ +:py:mod:`core.common.registry` +============================== + +.. py:module:: core.common.registry + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + # Copyright (c) Meta, Inc. and its affiliates. + # Borrowed from https://github.com/facebookresearch/pythia/blob/master/pythia/common/registry.py. + + Registry is central source of truth. Inspired from Redux's concept of + global store, Registry maintains mappings of various information to unique + keys. Special functions in registry can be used as decorators to register + different kind of classes. + + Import the global registry object using + + ``from fairchem.core.common.registry import registry`` + + Various decorators for registry different kind of classes with unique keys + + - Register a model: ``@registry.register_model`` + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.registry.Registry + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.registry._get_absolute_mapping + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.common.registry.R + core.common.registry.NestedDict + core.common.registry.registry + + +.. py:data:: R + + + +.. py:data:: NestedDict + + + +.. py:function:: _get_absolute_mapping(name: str) + + +.. py:class:: Registry + + + Class for registry object which acts as central source of truth. + + .. py:attribute:: mapping + :type: ClassVar[NestedDict] + + + + .. py:method:: register_task(name: str) + :classmethod: + + Register a new task to registry with key 'name' + :param name: Key with which the task will be registered. + + Usage:: + from fairchem.core.common.registry import registry + from fairchem.core.tasks import BaseTask + @registry.register_task("train") + class TrainTask(BaseTask): + ... + + + .. py:method:: register_dataset(name: str) + :classmethod: + + Register a dataset to registry with key 'name' + + :param name: Key with which the dataset will be registered. + + Usage:: + + from fairchem.core.common.registry import registry + from fairchem.core.datasets import BaseDataset + + @registry.register_dataset("qm9") + class QM9(BaseDataset): + ... + + + .. py:method:: register_model(name: str) + :classmethod: + + Register a model to registry with key 'name' + + :param name: Key with which the model will be registered. + + Usage:: + + from fairchem.core.common.registry import registry + from fairchem.core.modules.layers import CGCNNConv + + @registry.register_model("cgcnn") + class CGCNN(): + ... + + + .. py:method:: register_logger(name: str) + :classmethod: + + Register a logger to registry with key 'name' + + :param name: Key with which the logger will be registered. + + Usage:: + + from fairchem.core.common.registry import registry + + @registry.register_logger("wandb") + class WandBLogger(): + ... + + + .. py:method:: register_trainer(name: str) + :classmethod: + + Register a trainer to registry with key 'name' + + :param name: Key with which the trainer will be registered. + + Usage:: + + from fairchem.core.common.registry import registry + + @registry.register_trainer("active_discovery") + class ActiveDiscoveryTrainer(): + ... + + + .. py:method:: register(name: str, obj) -> None + :classmethod: + + Register an item to registry with key 'name' + + :param name: Key with which the item will be registered. + + Usage:: + + from fairchem.core.common.registry import registry + + registry.register("config", {}) + + + .. py:method:: __import_error(name: str, mapping_name: str) -> RuntimeError + :classmethod: + + + .. py:method:: get_class(name: str, mapping_name: str) + :classmethod: + + + .. py:method:: get_task_class(name: str) + :classmethod: + + + .. py:method:: get_dataset_class(name: str) + :classmethod: + + + .. py:method:: get_model_class(name: str) + :classmethod: + + + .. py:method:: get_logger_class(name: str) + :classmethod: + + + .. py:method:: get_trainer_class(name: str) + :classmethod: + + + .. py:method:: get(name: str, default=None, no_warning: bool = False) + :classmethod: + + Get an item from registry with key 'name' + + :param name: Key whose value needs to be retrieved. + :type name: string + :param default: If passed and key is not in registry, default value will + be returned with a warning. Default: None + :param no_warning: If passed as True, warning when key doesn't exist + will not be generated. Useful for cgcnn's + internal operations. Default: False + :type no_warning: bool + + Usage:: + + from fairchem.core.common.registry import registry + + config = registry.get("config") + + + .. py:method:: unregister(name: str) + :classmethod: + + Remove an item from registry with key 'name' + + :param name: Key which needs to be removed. + + Usage:: + + from fairchem.core.common.registry import registry + + config = registry.unregister("config") + + + +.. py:data:: registry + + + diff --git a/_sources/autoapi/core/common/relaxation/ase_utils/index.rst b/_sources/autoapi/core/common/relaxation/ase_utils/index.rst new file mode 100644 index 000000000..31ed7d88a --- /dev/null +++ b/_sources/autoapi/core/common/relaxation/ase_utils/index.rst @@ -0,0 +1,105 @@ +:py:mod:`core.common.relaxation.ase_utils` +========================================== + +.. py:module:: core.common.relaxation.ase_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + + Utilities to interface OCP models/trainers with the Atomic Simulation + Environment (ASE) + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.relaxation.ase_utils.OCPCalculator + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.relaxation.ase_utils.batch_to_atoms + + + +.. py:function:: batch_to_atoms(batch) + + +.. py:class:: OCPCalculator(config_yml: str | None = None, checkpoint_path: str | None = None, model_name: str | None = None, local_cache: str | None = None, trainer: str | None = None, cutoff: int = 6, max_neighbors: int = 50, cpu: bool = True, seed: int | None = None) + + + Bases: :py:obj:`ase.calculators.calculator.Calculator` + + Base-class for all ASE calculators. + + A calculator must raise PropertyNotImplementedError if asked for a + property that it can't calculate. So, if calculation of the + stress tensor has not been implemented, get_stress(atoms) should + raise PropertyNotImplementedError. This can be achieved simply by not + including the string 'stress' in the list implemented_properties + which is a class member. These are the names of the standard + properties: 'energy', 'forces', 'stress', 'dipole', 'charges', + 'magmom' and 'magmoms'. + + .. py:attribute:: implemented_properties + :type: ClassVar[list[str]] + :value: ['energy', 'forces'] + + + + .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None + + Load existing trained model + + :param checkpoint_path: string + Path to trained model + + + .. py:method:: calculate(atoms: ase.Atoms, properties, system_changes) -> None + + Do the calculation. + + properties: list of str + List of what needs to be calculated. Can be any combination + of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom' + and 'magmoms'. + system_changes: list of str + List of what has changed since last calculation. Can be + any combination of these six: 'positions', 'numbers', 'cell', + 'pbc', 'initial_charges' and 'initial_magmoms'. + + Subclasses need to implement this, but can ignore properties + and system_changes if they want. Calculated properties should + be inserted into results dictionary like shown in this dummy + example:: + + self.results = {'energy': 0.0, + 'forces': np.zeros((len(atoms), 3)), + 'stress': np.zeros(6), + 'dipole': np.zeros(3), + 'charges': np.zeros(len(atoms)), + 'magmom': 0.0, + 'magmoms': np.zeros(len(atoms))} + + The subclass implementation should first call this + implementation to set the atoms attribute and create any missing + directories. + + + diff --git a/_sources/autoapi/core/common/relaxation/index.rst b/_sources/autoapi/core/common/relaxation/index.rst new file mode 100644 index 000000000..467dcf222 --- /dev/null +++ b/_sources/autoapi/core/common/relaxation/index.rst @@ -0,0 +1,25 @@ +:py:mod:`core.common.relaxation` +================================ + +.. py:module:: core.common.relaxation + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + optimizers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + ase_utils/index.rst + ml_relaxation/index.rst + + diff --git a/_sources/autoapi/core/common/relaxation/ml_relaxation/index.rst b/_sources/autoapi/core/common/relaxation/ml_relaxation/index.rst new file mode 100644 index 000000000..bde58704f --- /dev/null +++ b/_sources/autoapi/core/common/relaxation/ml_relaxation/index.rst @@ -0,0 +1,43 @@ +:py:mod:`core.common.relaxation.ml_relaxation` +============================================== + +.. py:module:: core.common.relaxation.ml_relaxation + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.relaxation.ml_relaxation.ml_relax + + + +.. py:function:: ml_relax(batch, model, steps: int, fmax: float, relax_opt, save_full_traj, device: str = 'cuda:0', transform=None, early_stop_batch: bool = False) + + Runs ML-based relaxations. + :param batch: object + :param model: object + :param steps: int + Max number of steps in the structure relaxation. + :param fmax: float + Structure relaxation terminates when the max force + of the system is no bigger than fmax. + :param relax_opt: str + Optimizer and corresponding parameters to be used for structure relaxations. + :param save_full_traj: bool + Whether to save out the full ASE trajectory. If False, only save out initial and final frames. + + diff --git a/_sources/autoapi/core/common/relaxation/optimizers/index.rst b/_sources/autoapi/core/common/relaxation/optimizers/index.rst new file mode 100644 index 000000000..2d9ed8efc --- /dev/null +++ b/_sources/autoapi/core/common/relaxation/optimizers/index.rst @@ -0,0 +1,15 @@ +:py:mod:`core.common.relaxation.optimizers` +=========================================== + +.. py:module:: core.common.relaxation.optimizers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + lbfgs_torch/index.rst + + diff --git a/_sources/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.rst b/_sources/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.rst new file mode 100644 index 000000000..c8606dd66 --- /dev/null +++ b/_sources/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.rst @@ -0,0 +1,60 @@ +:py:mod:`core.common.relaxation.optimizers.lbfgs_torch` +======================================================= + +.. py:module:: core.common.relaxation.optimizers.lbfgs_torch + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.relaxation.optimizers.lbfgs_torch.LBFGS + core.common.relaxation.optimizers.lbfgs_torch.TorchCalc + + + + +.. py:class:: LBFGS(batch: torch_geometric.data.Batch, model: TorchCalc, maxstep: float = 0.01, memory: int = 100, damping: float = 0.25, alpha: float = 100.0, force_consistent=None, device: str = 'cuda:0', save_full_traj: bool = True, traj_dir: pathlib.Path | None = None, traj_names=None, early_stop_batch: bool = False) + + + .. py:method:: get_energy_and_forces(apply_constraint: bool = True) + + + .. py:method:: set_positions(update, update_mask) -> None + + + .. py:method:: check_convergence(iteration, forces=None, energy=None) + + + .. py:method:: run(fmax, steps) + + + .. py:method:: step(iteration: int, forces: torch.Tensor | None, update_mask: torch.Tensor) -> None + + + .. py:method:: write(energy, forces, update_mask) -> None + + + +.. py:class:: TorchCalc(model, transform=None) + + + .. py:method:: get_energy_and_forces(atoms, apply_constraint: bool = True) + + + .. py:method:: update_graph(atoms) + + + diff --git a/_sources/autoapi/core/common/transforms/index.rst b/_sources/autoapi/core/common/transforms/index.rst new file mode 100644 index 000000000..432b4868a --- /dev/null +++ b/_sources/autoapi/core/common/transforms/index.rst @@ -0,0 +1,50 @@ +:py:mod:`core.common.transforms` +================================ + +.. py:module:: core.common.transforms + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.transforms.RandomRotate + + + + +.. py:class:: RandomRotate(degrees, axes: list[int] | None = None) + + + Rotates node positions around a specific axis by a randomly sampled + factor within a given interval. + + :param degrees: Rotation interval from which the rotation + angle is sampled. If `degrees` is a number instead of a + tuple, the interval is given by :math:`[-\mathrm{degrees}, + \mathrm{degrees}]`. + :type degrees: tuple or float + :param axes: The rotation axes. (default: `[0, 1, 2]`) + :type axes: int, optional + + .. py:method:: __call__(data) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + diff --git a/_sources/autoapi/core/common/tutorial_utils/index.rst b/_sources/autoapi/core/common/tutorial_utils/index.rst new file mode 100644 index 000000000..8efa71380 --- /dev/null +++ b/_sources/autoapi/core/common/tutorial_utils/index.rst @@ -0,0 +1,65 @@ +:py:mod:`core.common.tutorial_utils` +==================================== + +.. py:module:: core.common.tutorial_utils + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.tutorial_utils.fairchem_root + core.common.tutorial_utils.fairchem_main + core.common.tutorial_utils.describe_fairchem + core.common.tutorial_utils.train_test_val_split + core.common.tutorial_utils.generate_yml_config + + + +.. py:function:: fairchem_root() + + Return the root directory of the installed fairchem-core package. + + +.. py:function:: fairchem_main() + + Return the path to fairchem main.py + + +.. py:function:: describe_fairchem() + + Print some system information that could be useful in debugging. + + +.. py:function:: train_test_val_split(ase_db, ttv=(0.8, 0.1, 0.1), files=('train.db', 'test.db', 'val.db'), seed=42) + + Split an ase db into train, test and validation dbs. + + ase_db: path to an ase db containing all the data. + ttv: a tuple containing the fraction of train, test and val data. This will be normalized. + files: a tuple of filenames to write the splits into. An exception is raised if these exist. + You should delete them first. + seed: an integer for the random number generator seed + + Returns the absolute path to files. + + +.. py:function:: generate_yml_config(checkpoint_path, yml='run.yml', delete=(), update=()) + + Generate a yml config file from an existing checkpoint file. + + checkpoint_path: string to path of an existing checkpoint + yml: name of file to write to. + pop: list of keys to remove from the config + update: dictionary of key:values to update + + Use a dot notation in update. + + Returns an absolute path to the generated yml file. + + diff --git a/_sources/autoapi/core/common/typing/index.rst b/_sources/autoapi/core/common/typing/index.rst new file mode 100644 index 000000000..1b9a4a5cc --- /dev/null +++ b/_sources/autoapi/core/common/typing/index.rst @@ -0,0 +1,38 @@ +:py:mod:`core.common.typing` +============================ + +.. py:module:: core.common.typing + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.typing.assert_is_instance + core.common.typing.none_throws + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.common.typing._T + + +.. py:data:: _T + + + +.. py:function:: assert_is_instance(obj: object, cls: type[_T]) -> _T + + +.. py:function:: none_throws(x: _T | None, msg: str | None = None) -> _T + + diff --git a/_sources/autoapi/core/common/utils/index.rst b/_sources/autoapi/core/common/utils/index.rst new file mode 100644 index 000000000..629dfcda4 --- /dev/null +++ b/_sources/autoapi/core/common/utils/index.rst @@ -0,0 +1,290 @@ +:py:mod:`core.common.utils` +=========================== + +.. py:module:: core.common.utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.common.utils.UniqueKeyLoader + core.common.utils.Complete + core.common.utils.SeverityLevelBetween + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.common.utils.pyg2_data_transform + core.common.utils.save_checkpoint + core.common.utils.warmup_lr_lambda + core.common.utils.print_cuda_usage + core.common.utils.conditional_grad + core.common.utils.plot_histogram + core.common.utils.collate + core.common.utils.add_edge_distance_to_graph + core.common.utils._import_local_file + core.common.utils.setup_experimental_imports + core.common.utils._get_project_root + core.common.utils.setup_imports + core.common.utils.dict_set_recursively + core.common.utils.parse_value + core.common.utils.create_dict_from_args + core.common.utils.load_config + core.common.utils.build_config + core.common.utils.create_grid + core.common.utils.save_experiment_log + core.common.utils.get_pbc_distances + core.common.utils.radius_graph_pbc + core.common.utils.get_max_neighbors_mask + core.common.utils.get_pruned_edge_idx + core.common.utils.merge_dicts + core.common.utils.setup_logging + core.common.utils.compute_neighbors + core.common.utils.check_traj_files + core.common.utils.new_trainer_context + core.common.utils._resolve_scale_factor_submodule + core.common.utils._report_incompat_keys + core.common.utils.load_state_dict + core.common.utils.scatter_det + core.common.utils.get_commit_hash + core.common.utils.cg_change_mat + core.common.utils.irreps_sum + core.common.utils.update_config + core.common.utils.get_loss_module + + + +.. py:class:: UniqueKeyLoader(stream) + + + Bases: :py:obj:`yaml.SafeLoader` + + .. py:method:: construct_mapping(node, deep=False) + + + +.. py:function:: pyg2_data_transform(data: torch_geometric.data.Data) + + if we're on the new pyg (2.0 or later) and if the Data stored is in older format + we need to convert the data to the new format + + +.. py:function:: save_checkpoint(state, checkpoint_dir: str = 'checkpoints/', checkpoint_file: str = 'checkpoint.pt') -> str + + +.. py:class:: Complete + + + .. py:method:: __call__(data) + + + +.. py:function:: warmup_lr_lambda(current_step: int, optim_config) + + Returns a learning rate multiplier. + Till `warmup_steps`, learning rate linearly increases to `initial_lr`, + and then gets multiplied by `lr_gamma` every time a milestone is crossed. + + +.. py:function:: print_cuda_usage() -> None + + +.. py:function:: conditional_grad(dec) + + Decorator to enable/disable grad depending on whether force/energy predictions are being made + + +.. py:function:: plot_histogram(data, xlabel: str = '', ylabel: str = '', title: str = '') + + +.. py:function:: collate(data_list) + + +.. py:function:: add_edge_distance_to_graph(batch, device='cpu', dmin: float = 0.0, dmax: float = 6.0, num_gaussians: int = 50) + + +.. py:function:: _import_local_file(path: pathlib.Path, *, project_root: pathlib.Path) -> None + + Imports a Python file as a module + + :param path: The path to the file to import + :type path: Path + :param project_root: The root directory of the project (i.e., the "ocp" folder) + :type project_root: Path + + +.. py:function:: setup_experimental_imports(project_root: pathlib.Path) -> None + + Import selected directories of modules from the "experimental" subdirectory. + + If a file named ".include" is present in the "experimental" subdirectory, + this will be read as a list of experimental subdirectories whose module + (including in any subsubdirectories) should be imported. + + :param project_root: The root directory of the project (i.e., the "ocp" folder) + + +.. py:function:: _get_project_root() -> pathlib.Path + + Gets the root folder of the project (the "ocp" folder) + :return: The absolute path to the project root. + + +.. py:function:: setup_imports(config: dict | None = None) -> None + + +.. py:function:: dict_set_recursively(dictionary, key_sequence, val) -> None + + +.. py:function:: parse_value(value) + + Parse string as Python literal if possible and fallback to string. + + +.. py:function:: create_dict_from_args(args: list, sep: str = '.') + + Create a (nested) dictionary from console arguments. + Keys in different dictionary levels are separated by sep. + + +.. py:function:: load_config(path: str, previous_includes: list | None = None) + + +.. py:function:: build_config(args, args_override) + + +.. py:function:: create_grid(base_config, sweep_file: str) + + +.. py:function:: save_experiment_log(args, jobs, configs) + + +.. py:function:: get_pbc_distances(pos, edge_index, cell, cell_offsets, neighbors, return_offsets: bool = False, return_distance_vec: bool = False) + + +.. py:function:: radius_graph_pbc(data, radius, max_num_neighbors_threshold, enforce_max_neighbors_strictly: bool = False, pbc=None) + + +.. py:function:: get_max_neighbors_mask(natoms, index, atom_distance, max_num_neighbors_threshold, degeneracy_tolerance: float = 0.01, enforce_max_strictly: bool = False) + + Give a mask that filters out edges so that each atom has at most + `max_num_neighbors_threshold` neighbors. + Assumes that `index` is sorted. + + Enforcing the max strictly can force the arbitrary choice between + degenerate edges. This can lead to undesired behaviors; for + example, bulk formation energies which are not invariant to + unit cell choice. + + A degeneracy tolerance can help prevent sudden changes in edge + existence from small changes in atom position, for example, + rounding errors, slab relaxation, temperature, etc. + + +.. py:function:: get_pruned_edge_idx(edge_index, num_atoms: int, max_neigh: float = 1000000000.0) -> torch.Tensor + + +.. py:function:: merge_dicts(dict1: dict, dict2: dict) + + Recursively merge two dictionaries. + Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a + value, this will call itself recursively to merge these dictionaries. + This does not modify the input dictionaries (creates an internal copy). + Additionally returns a list of detected duplicates. + Adapted from https://github.com/TUM-DAML/seml/blob/master/seml/utils.py + + :param dict1: First dict. + :type dict1: dict + :param dict2: Second dict. Values in dict2 will override values from dict1 in case they share the same key. + :type dict2: dict + + :returns: **return_dict** -- Merged dictionaries. + :rtype: dict + + +.. py:class:: SeverityLevelBetween(min_level: int, max_level: int) + + + Bases: :py:obj:`logging.Filter` + + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + + .. py:method:: filter(record) -> bool + + Determine if the specified record is to be logged. + + Returns True if the record should be logged, or False otherwise. + If deemed appropriate, the record may be modified in-place. + + + +.. py:function:: setup_logging() -> None + + +.. py:function:: compute_neighbors(data, edge_index) + + +.. py:function:: check_traj_files(batch, traj_dir) -> bool + + +.. py:function:: new_trainer_context(*, config: dict[str, Any], distributed: bool = False) + + +.. py:function:: _resolve_scale_factor_submodule(model: torch.nn.Module, name: str) + + +.. py:function:: _report_incompat_keys(model: torch.nn.Module, keys: torch.nn.modules.module._IncompatibleKeys, strict: bool = False) -> tuple[list[str], list[str]] + + +.. py:function:: load_state_dict(module: torch.nn.Module, state_dict: collections.abc.Mapping[str, torch.Tensor], strict: bool = True) -> tuple[list[str], list[str]] + + +.. py:function:: scatter_det(*args, **kwargs) + + +.. py:function:: get_commit_hash() + + +.. py:function:: cg_change_mat(ang_mom: int, device: str = 'cpu') -> torch.tensor + + +.. py:function:: irreps_sum(ang_mom: int) -> int + + Returns the sum of the dimensions of the irreps up to the specified angular momentum. + + :param ang_mom: max angular momenttum to sum up dimensions of irreps + + +.. py:function:: update_config(base_config) + + Configs created prior to OCP 2.0 are organized a little different than they + are now. Update old configs to fit the new expected structure. + + +.. py:function:: get_loss_module(loss_name) + + diff --git a/_sources/autoapi/core/datasets/_utils/index.rst b/_sources/autoapi/core/datasets/_utils/index.rst new file mode 100644 index 000000000..d01a3c880 --- /dev/null +++ b/_sources/autoapi/core/datasets/_utils/index.rst @@ -0,0 +1,35 @@ +:py:mod:`core.datasets._utils` +============================== + +.. py:module:: core.datasets._utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.datasets._utils.rename_data_object_keys + + + +.. py:function:: rename_data_object_keys(data_object: torch_geometric.data.Data, key_mapping: dict[str, str]) -> torch_geometric.data.Data + + Rename data object keys + + :param data_object: data object + :param key_mapping: dictionary specifying keys to rename and new names {prev_key: new_key} + + diff --git a/_sources/autoapi/core/datasets/ase_datasets/index.rst b/_sources/autoapi/core/datasets/ase_datasets/index.rst new file mode 100644 index 000000000..2e7b438a4 --- /dev/null +++ b/_sources/autoapi/core/datasets/ase_datasets/index.rst @@ -0,0 +1,319 @@ +:py:mod:`core.datasets.ase_datasets` +==================================== + +.. py:module:: core.datasets.ase_datasets + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.datasets.ase_datasets.AseAtomsDataset + core.datasets.ase_datasets.AseReadDataset + core.datasets.ase_datasets.AseReadMultiStructureDataset + core.datasets.ase_datasets.AseDBDataset + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.ase_datasets.apply_one_tags + + + +.. py:function:: apply_one_tags(atoms: ase.Atoms, skip_if_nonzero: bool = True, skip_always: bool = False) + + This function will apply tags of 1 to an ASE atoms object. + It is used as an atoms_transform in the datasets contained in this file. + + Certain models will treat atoms differently depending on their tags. + For example, GemNet-OC by default will only compute triplet and quadruplet interactions + for atoms with non-zero tags. This model throws an error if there are no tagged atoms. + For this reason, the default behavior is to tag atoms in structures with no tags. + + :param skip_if_nonzero: If at least one atom has a nonzero tag, do not tag any atoms + :type skip_if_nonzero: bool + :param skip_always: Do not apply any tags. This arg exists so that this function can be disabled + without needing to pass a callable (which is currently difficult to do with main.py) + :type skip_always: bool + + +.. py:class:: AseAtomsDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`torch.utils.data.Dataset`, :py:obj:`abc.ABC` + + This is an abstract Dataset that includes helpful utilities for turning + ASE atoms objects into OCP-usable data objects. This should not be instantiated directly + as get_atoms_object and load_dataset_get_ids are not implemented in this base class. + + Derived classes must add at least two things: + self.get_atoms_object(id): a function that takes an identifier and returns a corresponding atoms object + + self.load_dataset_get_ids(config: dict): This function is responsible for any initialization/loads + of the dataset and importantly must return a list of all possible identifiers that can be passed into + self.get_atoms_object(id) + + Identifiers need not be any particular type. + + .. py:method:: __len__() -> int + + + .. py:method:: __getitem__(idx) + + + .. py:method:: get_atoms(idx: str | int) -> ase.Atoms + :abstractmethod: + + + .. py:method:: _load_dataset_get_ids(config) + :abstractmethod: + + + .. py:method:: get_relaxed_energy(identifier) + :abstractmethod: + + + .. py:method:: close_db() -> None + + + .. py:method:: get_metadata(num_samples: int = 100) -> dict + + + +.. py:class:: AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset uses ase.io.read to load data from a directory on disk. + This is intended for small-scale testing and demonstrations of OCP. + Larger datasets are better served by the efficiency of other dataset types + such as LMDB. + + For a full list of ASE-readable filetypes, see + https://wiki.fysik.dtu.dk/ase/ase/io/io.html + + :param config: src (str): The source folder that contains your ASE-readable files + + pattern (str): Filepath matching each file you want to read + ex. "*/POSCAR", "*.cif", "*.xyz" + search recursively with two wildcards: "**/POSCAR" or "**/*.cif" + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the files you read (ex. OUTCAR) + + ase_read_args (dict): Keyword arguments for ase.io.read() + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. + The relaxed structure is assumed to be the final structure in the file + (e.g. the last frame of a .traj). + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transform_args (dict): Additional keyword arguments for the transform callable + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config) -> list[pathlib.Path] + + + .. py:method:: get_atoms(idx: str | int) -> ase.Atoms + + + .. py:method:: get_relaxed_energy(identifier) -> float + + + +.. py:class:: AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset can read multiple structures from each file using ase.io.read. + The disadvantage is that all files must be read at startup. + This is a significant cost for large datasets. + + This is intended for small-scale testing and demonstrations of OCP. + Larger datasets are better served by the efficiency of other dataset types + such as LMDB. + + For a full list of ASE-readable filetypes, see + https://wiki.fysik.dtu.dk/ase/ase/io/io.html + + :param config: src (str): The source folder that contains your ASE-readable files + + pattern (str): Filepath matching each file you want to read + ex. "*.traj", "*.xyz" + search recursively with two wildcards: "**/POSCAR" or "**/*.cif" + + index_file (str): Filepath to an indexing file, which contains each filename + and the number of structures contained in each file. For instance: + + /path/to/relaxation1.traj 200 + /path/to/relaxation2.traj 150 + + This will overrule the src and pattern that you specify! + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the files you read (ex. OUTCAR) + + ase_read_args (dict): Keyword arguments for ase.io.read() + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. + The relaxed structure is assumed to be the final structure in the file + (e.g. the last frame of a .traj). + + use_tqdm (bool): Use TQDM progress bar when initializing dataset + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transform_args (dict): Additional keyword arguments for the transform callable + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + :param transform: Additional preprocessing function for the Data object + :type transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config) -> list[str] + + + .. py:method:: get_atoms(idx: str) -> ase.Atoms + + + .. py:method:: get_metadata(num_samples: int = 100) -> dict + + + .. py:method:: get_relaxed_energy(identifier) -> float + + + +.. py:class:: AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset connects to an ASE Database, allowing the storage of atoms objects + with a variety of backends including JSON, SQLite, and database server options. + + For more information, see: + https://databases.fysik.dtu.dk/ase/ase/db/db.html + + :param config: + src (str): Either + - the path an ASE DB, + - the connection address of an ASE DB, + - a folder with multiple ASE DBs, + - a list of folders with ASE DBs + - a glob string to use to find ASE DBs, or + - a list of ASE db paths/addresses. + If a folder, every file will be attempted as an ASE DB, and warnings + are raised for any files that can't connect cleanly + + Note that for large datasets, ID loading can be slow and there can be many + ids, so it's advised to make loading the id list as easy as possible. There is not + an obvious way to get a full list of ids from most ASE dbs besides simply looping + through the entire dataset. See the AseLMDBDataset which was written with this usecase + in mind. + + connect_args (dict): Keyword arguments for ase.db.connect() + + select_args (dict): Keyword arguments for ase.db.select() + You can use this to query/filter your database + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the database + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config} + where config is a dictionary specifying arguments to the transform_function + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + :param transform: deprecated? + :type transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config: dict) -> list[int] + + + .. py:method:: get_atoms(idx: int) -> ase.Atoms + + Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. + :param idx: index in dataset + :type idx: int + + :returns: ASE atoms corresponding to datapoint idx + :rtype: atoms + + + .. py:method:: connect_db(address: str | pathlib.Path, connect_args: dict | None = None) -> ase.db.core.Database + :staticmethod: + + + .. py:method:: close_db() -> None + + + .. py:method:: get_metadata(num_samples: int = 100) -> dict + + + .. py:method:: get_relaxed_energy(identifier) + :abstractmethod: + + + diff --git a/_sources/autoapi/core/datasets/embeddings/atomic_radii/index.rst b/_sources/autoapi/core/datasets/embeddings/atomic_radii/index.rst new file mode 100644 index 000000000..e16ec27fc --- /dev/null +++ b/_sources/autoapi/core/datasets/embeddings/atomic_radii/index.rst @@ -0,0 +1,20 @@ +:py:mod:`core.datasets.embeddings.atomic_radii` +=============================================== + +.. py:module:: core.datasets.embeddings.atomic_radii + +.. autoapi-nested-parse:: + + Atomic radii in picometers + + NaN stored for unavailable parameters. + + + +Module Contents +--------------- + +.. py:data:: ATOMIC_RADII + + + diff --git a/_sources/autoapi/core/datasets/embeddings/continuous_embeddings/index.rst b/_sources/autoapi/core/datasets/embeddings/continuous_embeddings/index.rst new file mode 100644 index 000000000..09fbe260a --- /dev/null +++ b/_sources/autoapi/core/datasets/embeddings/continuous_embeddings/index.rst @@ -0,0 +1,31 @@ +:py:mod:`core.datasets.embeddings.continuous_embeddings` +======================================================== + +.. py:module:: core.datasets.embeddings.continuous_embeddings + +.. autoapi-nested-parse:: + + CGCNN-like embeddings using continuous values instead of original k-hot. + + Properties: + Group number + Period number + Electronegativity + Covalent radius + Valence electrons + First ionization energy + Electron affinity + Block + Atomic Volume + + NaN stored for unavaialable parameters. + + + +Module Contents +--------------- + +.. py:data:: CONTINUOUS_EMBEDDINGS + + + diff --git a/_sources/autoapi/core/datasets/embeddings/index.rst b/_sources/autoapi/core/datasets/embeddings/index.rst new file mode 100644 index 000000000..60d6b3877 --- /dev/null +++ b/_sources/autoapi/core/datasets/embeddings/index.rst @@ -0,0 +1,37 @@ +:py:mod:`core.datasets.embeddings` +================================== + +.. py:module:: core.datasets.embeddings + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + atomic_radii/index.rst + continuous_embeddings/index.rst + khot_embeddings/index.rst + qmof_khot_embeddings/index.rst + + +Package Contents +---------------- + +.. py:data:: ATOMIC_RADII + + + +.. py:data:: CONTINUOUS_EMBEDDINGS + + + +.. py:data:: KHOT_EMBEDDINGS + + + +.. py:data:: QMOF_KHOT_EMBEDDINGS + + + diff --git a/_sources/autoapi/core/datasets/embeddings/khot_embeddings/index.rst b/_sources/autoapi/core/datasets/embeddings/khot_embeddings/index.rst new file mode 100644 index 000000000..fb5181d46 --- /dev/null +++ b/_sources/autoapi/core/datasets/embeddings/khot_embeddings/index.rst @@ -0,0 +1,24 @@ +:py:mod:`core.datasets.embeddings.khot_embeddings` +================================================== + +.. py:module:: core.datasets.embeddings.khot_embeddings + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + Original CGCNN k-hot elemental embeddings. + + + +Module Contents +--------------- + +.. py:data:: KHOT_EMBEDDINGS + + + diff --git a/_sources/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.rst b/_sources/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.rst new file mode 100644 index 000000000..0ce017258 --- /dev/null +++ b/_sources/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.rst @@ -0,0 +1,26 @@ +:py:mod:`core.datasets.embeddings.qmof_khot_embeddings` +======================================================= + +.. py:module:: core.datasets.embeddings.qmof_khot_embeddings + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + k-hot elemental embeddings from QMOF, motivated by the following Github Issue threads: + https://github.com/txie-93/cgcnn/issues/2 + https://github.com/arosen93/QMOF/issues/18 + + + +Module Contents +--------------- + +.. py:data:: QMOF_KHOT_EMBEDDINGS + + + diff --git a/_sources/autoapi/core/datasets/index.rst b/_sources/autoapi/core/datasets/index.rst new file mode 100644 index 000000000..02ebbf156 --- /dev/null +++ b/_sources/autoapi/core/datasets/index.rst @@ -0,0 +1,487 @@ +:py:mod:`core.datasets` +======================= + +.. py:module:: core.datasets + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + embeddings/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + _utils/index.rst + ase_datasets/index.rst + lmdb_database/index.rst + lmdb_dataset/index.rst + oc22_lmdb_dataset/index.rst + target_metadata_guesser/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.datasets.AseDBDataset + core.datasets.AseReadDataset + core.datasets.AseReadMultiStructureDataset + core.datasets.LMDBDatabase + core.datasets.LmdbDataset + core.datasets.SinglePointLmdbDataset + core.datasets.TrajectoryLmdbDataset + core.datasets.OC22LmdbDataset + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.data_list_collater + + + +.. py:class:: AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset connects to an ASE Database, allowing the storage of atoms objects + with a variety of backends including JSON, SQLite, and database server options. + + For more information, see: + https://databases.fysik.dtu.dk/ase/ase/db/db.html + + :param config: + src (str): Either + - the path an ASE DB, + - the connection address of an ASE DB, + - a folder with multiple ASE DBs, + - a list of folders with ASE DBs + - a glob string to use to find ASE DBs, or + - a list of ASE db paths/addresses. + If a folder, every file will be attempted as an ASE DB, and warnings + are raised for any files that can't connect cleanly + + Note that for large datasets, ID loading can be slow and there can be many + ids, so it's advised to make loading the id list as easy as possible. There is not + an obvious way to get a full list of ids from most ASE dbs besides simply looping + through the entire dataset. See the AseLMDBDataset which was written with this usecase + in mind. + + connect_args (dict): Keyword arguments for ase.db.connect() + + select_args (dict): Keyword arguments for ase.db.select() + You can use this to query/filter your database + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the database + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config} + where config is a dictionary specifying arguments to the transform_function + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + :param transform: deprecated? + :type transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config: dict) -> list[int] + + + .. py:method:: get_atoms(idx: int) -> ase.Atoms + + Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. + :param idx: index in dataset + :type idx: int + + :returns: ASE atoms corresponding to datapoint idx + :rtype: atoms + + + .. py:method:: connect_db(address: str | pathlib.Path, connect_args: dict | None = None) -> ase.db.core.Database + :staticmethod: + + + .. py:method:: close_db() -> None + + + .. py:method:: get_metadata(num_samples: int = 100) -> dict + + + .. py:method:: get_relaxed_energy(identifier) + :abstractmethod: + + + +.. py:class:: AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset uses ase.io.read to load data from a directory on disk. + This is intended for small-scale testing and demonstrations of OCP. + Larger datasets are better served by the efficiency of other dataset types + such as LMDB. + + For a full list of ASE-readable filetypes, see + https://wiki.fysik.dtu.dk/ase/ase/io/io.html + + :param config: src (str): The source folder that contains your ASE-readable files + + pattern (str): Filepath matching each file you want to read + ex. "*/POSCAR", "*.cif", "*.xyz" + search recursively with two wildcards: "**/POSCAR" or "**/*.cif" + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the files you read (ex. OUTCAR) + + ase_read_args (dict): Keyword arguments for ase.io.read() + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. + The relaxed structure is assumed to be the final structure in the file + (e.g. the last frame of a .traj). + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transform_args (dict): Additional keyword arguments for the transform callable + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config) -> list[pathlib.Path] + + + .. py:method:: get_atoms(idx: str | int) -> ase.Atoms + + + .. py:method:: get_relaxed_energy(identifier) -> float + + + +.. py:class:: AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) + + + Bases: :py:obj:`AseAtomsDataset` + + This Dataset can read multiple structures from each file using ase.io.read. + The disadvantage is that all files must be read at startup. + This is a significant cost for large datasets. + + This is intended for small-scale testing and demonstrations of OCP. + Larger datasets are better served by the efficiency of other dataset types + such as LMDB. + + For a full list of ASE-readable filetypes, see + https://wiki.fysik.dtu.dk/ase/ase/io/io.html + + :param config: src (str): The source folder that contains your ASE-readable files + + pattern (str): Filepath matching each file you want to read + ex. "*.traj", "*.xyz" + search recursively with two wildcards: "**/POSCAR" or "**/*.cif" + + index_file (str): Filepath to an indexing file, which contains each filename + and the number of structures contained in each file. For instance: + + /path/to/relaxation1.traj 200 + /path/to/relaxation2.traj 150 + + This will overrule the src and pattern that you specify! + + a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() + default options will work for most users + + If you are using this for a training dataset, set + "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate + In that case, energy/forces must be in the files you read (ex. OUTCAR) + + ase_read_args (dict): Keyword arguments for ase.io.read() + + keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need + to iterate over a dataset many times (e.g. training for many epochs). + Not recommended for large datasets. + + include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. + The relaxed structure is assumed to be the final structure in the file + (e.g. the last frame of a .traj). + + use_tqdm (bool): Use TQDM progress bar when initializing dataset + + atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable + + transform_args (dict): Additional keyword arguments for the transform callable + + key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used + in the model with the corresponding property as it was named in the dataset. Only need to use if + the name is different. + :type config: dict + :param atoms_transform: Additional preprocessing function applied to the Atoms + object. Useful for applying tags, for example. + :type atoms_transform: callable, optional + :param transform: Additional preprocessing function for the Data object + :type transform: callable, optional + + .. py:method:: _load_dataset_get_ids(config) -> list[str] + + + .. py:method:: get_atoms(idx: str) -> ase.Atoms + + + .. py:method:: get_metadata(num_samples: int = 100) -> dict + + + .. py:method:: get_relaxed_energy(identifier) -> float + + + +.. py:class:: LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs) + + + Bases: :py:obj:`ase.db.core.Database` + + Base class for all databases. + + .. py:property:: metadata + + Load the metadata from the DB if present + + .. py:property:: _nextid + + Get the id of the next row to be written + + .. py:method:: __enter__() -> typing_extensions.Self + + + .. py:method:: __exit__(exc_type, exc_value, tb) -> None + + + .. py:method:: close() -> None + + + .. py:method:: _write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) -> None + + + .. py:method:: _update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None) + + + .. py:method:: _write_deleted_ids() + + + .. py:method:: delete(ids: list[int]) -> None + + Delete rows. + + + .. py:method:: _get_row(idx: int, include_data: bool = True) + + + .. py:method:: _get_row_by_index(index: int, include_data: bool = True) + + Auxiliary function to get the ith entry, rather than a specific id + + + .. py:method:: _select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all') + + + .. py:method:: count(selection=None, **kwargs) -> int + + Count rows. + + See the select() method for the selection syntax. Use db.count() or + len(db) to count all rows. + + + .. py:method:: _load_ids() -> None + + Load ids from the DB + + Since ASE db ids are mostly 1-N integers, but can be missing entries + if ids have been deleted. To save space and operating under the assumption + that there will probably not be many deletions in most OCP datasets, + we just store the deleted ids. + + + +.. py:class:: LmdbDataset(config) + + + Bases: :py:obj:`torch.utils.data.Dataset`\ [\ :py:obj:`T_co`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + .. py:attribute:: metadata_path + :type: pathlib.Path + + + + .. py:attribute:: sharded + :type: bool + + Dataset class to load from LMDB files containing relaxation + trajectories or single point computations. + Useful for Structure to Energy & Force (S2EF), Initial State to + Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. + The keys in the LMDB must be integers (stored as ascii objects) starting + from 0 through the length of the LMDB. For historical reasons any key named + "length" is ignored since that was used to infer length of many lmdbs in the same + folder, but lmdb lengths are now calculated directly from the number of keys. + :param config: Dataset configuration + :type config: dict + + .. py:method:: __len__() -> int + + + .. py:method:: __getitem__(idx: int) -> T_co + + + .. py:method:: connect_db(lmdb_path: pathlib.Path | None = None) -> lmdb.Environment + + + .. py:method:: close_db() -> None + + + .. py:method:: get_metadata(num_samples: int = 100) + + + +.. py:class:: SinglePointLmdbDataset(config, transform=None) + + + Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + +.. py:class:: TrajectoryLmdbDataset(config, transform=None) + + + Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + +.. py:function:: data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) -> torch_geometric.data.data.BaseData + + +.. py:class:: OC22LmdbDataset(config, transform=None) + + + Bases: :py:obj:`torch.utils.data.Dataset` + + Dataset class to load from LMDB files containing relaxation + trajectories or single point computations. + + Useful for Structure to Energy & Force (S2EF), Initial State to + Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. + + The keys in the LMDB must be integers (stored as ascii objects) starting + from 0 through the length of the LMDB. For historical reasons any key named + "length" is ignored since that was used to infer length of many lmdbs in the same + folder, but lmdb lengths are now calculated directly from the number of keys. + + :param config: Dataset configuration + :type config: dict + :param transform: Data transform function. + (default: :obj:`None`) + :type transform: callable, optional + + .. py:method:: __len__() -> int + + + .. py:method:: __getitem__(idx) + + + .. py:method:: connect_db(lmdb_path=None) + + + .. py:method:: close_db() -> None + + + diff --git a/_sources/autoapi/core/datasets/lmdb_database/index.rst b/_sources/autoapi/core/datasets/lmdb_database/index.rst new file mode 100644 index 000000000..141122c97 --- /dev/null +++ b/_sources/autoapi/core/datasets/lmdb_database/index.rst @@ -0,0 +1,111 @@ +:py:mod:`core.datasets.lmdb_database` +===================================== + +.. py:module:: core.datasets.lmdb_database + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is modified from the ASE db json backend + and is thus licensed under the corresponding LGPL2.1 license + + The ASE notice for the LGPL2.1 license is available here: + https://gitlab.com/ase/ase/-/blob/master/LICENSE + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.datasets.lmdb_database.LMDBDatabase + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.lmdb_database.RESERVED_KEYS + + +.. py:data:: RESERVED_KEYS + :value: ['nextid', 'metadata', 'deleted_ids'] + + + +.. py:class:: LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs) + + + Bases: :py:obj:`ase.db.core.Database` + + Base class for all databases. + + .. py:property:: metadata + + Load the metadata from the DB if present + + .. py:property:: _nextid + + Get the id of the next row to be written + + .. py:method:: __enter__() -> typing_extensions.Self + + + .. py:method:: __exit__(exc_type, exc_value, tb) -> None + + + .. py:method:: close() -> None + + + .. py:method:: _write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) -> None + + + .. py:method:: _update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None) + + + .. py:method:: _write_deleted_ids() + + + .. py:method:: delete(ids: list[int]) -> None + + Delete rows. + + + .. py:method:: _get_row(idx: int, include_data: bool = True) + + + .. py:method:: _get_row_by_index(index: int, include_data: bool = True) + + Auxiliary function to get the ith entry, rather than a specific id + + + .. py:method:: _select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all') + + + .. py:method:: count(selection=None, **kwargs) -> int + + Count rows. + + See the select() method for the selection syntax. Use db.count() or + len(db) to count all rows. + + + .. py:method:: _load_ids() -> None + + Load ids from the DB + + Since ASE db ids are mostly 1-N integers, but can be missing entries + if ids have been deleted. To save space and operating under the assumption + that there will probably not be many deletions in most OCP datasets, + we just store the deleted ids. + + + diff --git a/_sources/autoapi/core/datasets/lmdb_dataset/index.rst b/_sources/autoapi/core/datasets/lmdb_dataset/index.rst new file mode 100644 index 000000000..eeb091a11 --- /dev/null +++ b/_sources/autoapi/core/datasets/lmdb_dataset/index.rst @@ -0,0 +1,154 @@ +:py:mod:`core.datasets.lmdb_dataset` +==================================== + +.. py:module:: core.datasets.lmdb_dataset + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.datasets.lmdb_dataset.LmdbDataset + core.datasets.lmdb_dataset.SinglePointLmdbDataset + core.datasets.lmdb_dataset.TrajectoryLmdbDataset + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.lmdb_dataset.data_list_collater + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.lmdb_dataset.T_co + + +.. py:data:: T_co + + + +.. py:class:: LmdbDataset(config) + + + Bases: :py:obj:`torch.utils.data.Dataset`\ [\ :py:obj:`T_co`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + .. py:attribute:: metadata_path + :type: pathlib.Path + + + + .. py:attribute:: sharded + :type: bool + + Dataset class to load from LMDB files containing relaxation + trajectories or single point computations. + Useful for Structure to Energy & Force (S2EF), Initial State to + Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. + The keys in the LMDB must be integers (stored as ascii objects) starting + from 0 through the length of the LMDB. For historical reasons any key named + "length" is ignored since that was used to infer length of many lmdbs in the same + folder, but lmdb lengths are now calculated directly from the number of keys. + :param config: Dataset configuration + :type config: dict + + .. py:method:: __len__() -> int + + + .. py:method:: __getitem__(idx: int) -> T_co + + + .. py:method:: connect_db(lmdb_path: pathlib.Path | None = None) -> lmdb.Environment + + + .. py:method:: close_db() -> None + + + .. py:method:: get_metadata(num_samples: int = 100) + + + +.. py:class:: SinglePointLmdbDataset(config, transform=None) + + + Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + +.. py:class:: TrajectoryLmdbDataset(config, transform=None) + + + Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] + + An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + + +.. py:function:: data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) -> torch_geometric.data.data.BaseData + + diff --git a/_sources/autoapi/core/datasets/oc22_lmdb_dataset/index.rst b/_sources/autoapi/core/datasets/oc22_lmdb_dataset/index.rst new file mode 100644 index 000000000..fb3293db7 --- /dev/null +++ b/_sources/autoapi/core/datasets/oc22_lmdb_dataset/index.rst @@ -0,0 +1,62 @@ +:py:mod:`core.datasets.oc22_lmdb_dataset` +========================================= + +.. py:module:: core.datasets.oc22_lmdb_dataset + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.datasets.oc22_lmdb_dataset.OC22LmdbDataset + + + + +.. py:class:: OC22LmdbDataset(config, transform=None) + + + Bases: :py:obj:`torch.utils.data.Dataset` + + Dataset class to load from LMDB files containing relaxation + trajectories or single point computations. + + Useful for Structure to Energy & Force (S2EF), Initial State to + Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. + + The keys in the LMDB must be integers (stored as ascii objects) starting + from 0 through the length of the LMDB. For historical reasons any key named + "length" is ignored since that was used to infer length of many lmdbs in the same + folder, but lmdb lengths are now calculated directly from the number of keys. + + :param config: Dataset configuration + :type config: dict + :param transform: Data transform function. + (default: :obj:`None`) + :type transform: callable, optional + + .. py:method:: __len__() -> int + + + .. py:method:: __getitem__(idx) + + + .. py:method:: connect_db(lmdb_path=None) + + + .. py:method:: close_db() -> None + + + diff --git a/_sources/autoapi/core/datasets/target_metadata_guesser/index.rst b/_sources/autoapi/core/datasets/target_metadata_guesser/index.rst new file mode 100644 index 000000000..388fee8f7 --- /dev/null +++ b/_sources/autoapi/core/datasets/target_metadata_guesser/index.rst @@ -0,0 +1,42 @@ +:py:mod:`core.datasets.target_metadata_guesser` +=============================================== + +.. py:module:: core.datasets.target_metadata_guesser + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.datasets.target_metadata_guesser.uniform_atoms_lengths + core.datasets.target_metadata_guesser.target_constant_shape + core.datasets.target_metadata_guesser.target_per_atom + core.datasets.target_metadata_guesser.target_extensive + core.datasets.target_metadata_guesser.guess_target_metadata + core.datasets.target_metadata_guesser.guess_property_metadata + + + +.. py:function:: uniform_atoms_lengths(atoms_lens) -> bool + + +.. py:function:: target_constant_shape(atoms_lens, target_samples) -> bool + + +.. py:function:: target_per_atom(atoms_lens, target_samples) -> bool + + +.. py:function:: target_extensive(atoms_lens, target_samples, threshold: float = 0.2) + + +.. py:function:: guess_target_metadata(atoms_len, target_samples) + + +.. py:function:: guess_property_metadata(atoms_list) + + diff --git a/_sources/autoapi/core/index.rst b/_sources/autoapi/core/index.rst new file mode 100644 index 000000000..deca3e7d9 --- /dev/null +++ b/_sources/autoapi/core/index.rst @@ -0,0 +1,47 @@ +:py:mod:`core` +============== + +.. py:module:: core + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + common/index.rst + datasets/index.rst + models/index.rst + modules/index.rst + preprocessing/index.rst + scripts/index.rst + tasks/index.rst + tests/index.rst + trainers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + _cli/index.rst + + +Package Contents +---------------- + +.. py:data:: __version__ + + + diff --git a/_sources/autoapi/core/models/base/index.rst b/_sources/autoapi/core/models/base/index.rst new file mode 100644 index 000000000..649adc9ff --- /dev/null +++ b/_sources/autoapi/core/models/base/index.rst @@ -0,0 +1,80 @@ +:py:mod:`core.models.base` +========================== + +.. py:module:: core.models.base + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.base.BaseModel + + + + +.. py:class:: BaseModel(num_atoms=None, bond_feat_dim=None, num_targets=None) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:property:: num_params + :type: int + + + .. py:method:: forward(data) + :abstractmethod: + + + .. py:method:: generate_graph(data, cutoff=None, max_neighbors=None, use_pbc=None, otf_graph=None, enforce_max_neighbors_strictly=None) + + + .. py:method:: no_weight_decay() -> list + + Returns a list of parameters with no weight decay. + + + diff --git a/_sources/autoapi/core/models/dimenet_plus_plus/index.rst b/_sources/autoapi/core/models/dimenet_plus_plus/index.rst new file mode 100644 index 000000000..d01d3f104 --- /dev/null +++ b/_sources/autoapi/core/models/dimenet_plus_plus/index.rst @@ -0,0 +1,256 @@ +:py:mod:`core.models.dimenet_plus_plus` +======================================= + +.. py:module:: core.models.dimenet_plus_plus + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + --- + + This code borrows heavily from the DimeNet implementation as part of + pytorch-geometric: https://github.com/rusty1s/pytorch_geometric. License: + + --- + + Copyright (c) 2020 Matthias Fey + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.dimenet_plus_plus.InteractionPPBlock + core.models.dimenet_plus_plus.OutputPPBlock + core.models.dimenet_plus_plus.DimeNetPlusPlus + core.models.dimenet_plus_plus.DimeNetPlusPlusWrap + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.dimenet_plus_plus.sym + + +.. py:data:: sym + + + +.. py:class:: InteractionPPBlock(hidden_channels: int, int_emb_size: int, basis_emb_size: int, num_spherical: int, num_radial: int, num_before_skip: int, num_after_skip: int, act='silu') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(x, rbf, sbf, idx_kj, idx_ji) + + + +.. py:class:: OutputPPBlock(num_radial: int, hidden_channels: int, out_emb_channels: int, out_channels: int, num_layers: int, act: str = 'silu') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(x, rbf, i, num_nodes: int | None = None) + + + +.. py:class:: DimeNetPlusPlus(hidden_channels: int, out_channels: int, num_blocks: int, int_emb_size: int, basis_emb_size: int, out_emb_channels: int, num_spherical: int, num_radial: int, cutoff: float = 5.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, act: str = 'silu') + + + Bases: :py:obj:`torch.nn.Module` + + DimeNet++ implementation based on https://github.com/klicperajo/dimenet. + + :param hidden_channels: Hidden embedding size. + :type hidden_channels: int + :param out_channels: Size of each output sample. + :type out_channels: int + :param num_blocks: Number of building blocks. + :type num_blocks: int + :param int_emb_size: Embedding size used for interaction triplets + :type int_emb_size: int + :param basis_emb_size: Embedding size used in the basis transformation + :type basis_emb_size: int + :param out_emb_channels: Embedding size used for atoms in the output block + :type out_emb_channels: int + :param num_spherical: Number of spherical harmonics. + :type num_spherical: int + :param num_radial: Number of radial basis functions. + :type num_radial: int + :param cutoff: (float, optional): Cutoff distance for interatomic + interactions. (default: :obj:`5.0`) + :param envelope_exponent: Shape of the smooth cutoff. + (default: :obj:`5`) + :type envelope_exponent: int, optional + :param num_before_skip: (int, optional): Number of residual layers in the + interaction blocks before the skip connection. (default: :obj:`1`) + :param num_after_skip: (int, optional): Number of residual layers in the + interaction blocks after the skip connection. (default: :obj:`2`) + :param num_output_layers: (int, optional): Number of linear layers for the + output blocks. (default: :obj:`3`) + :param act: (function, optional): The activation funtion. + (default: :obj:`silu`) + + .. py:attribute:: url + :value: 'https://github.com/klicperajo/dimenet/raw/master/pretrained' + + + + .. py:method:: reset_parameters() -> None + + + .. py:method:: triplets(edge_index, cell_offsets, num_nodes: int) + + + .. py:method:: forward(z, pos, batch=None) + :abstractmethod: + + + +.. py:class:: DimeNetPlusPlusWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, hidden_channels: int = 128, num_blocks: int = 4, int_emb_size: int = 64, basis_emb_size: int = 8, out_emb_channels: int = 256, num_spherical: int = 7, num_radial: int = 6, otf_graph: bool = False, cutoff: float = 10.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3) + + + Bases: :py:obj:`DimeNetPlusPlus`, :py:obj:`fairchem.core.models.base.BaseModel` + + DimeNet++ implementation based on https://github.com/klicperajo/dimenet. + + :param hidden_channels: Hidden embedding size. + :type hidden_channels: int + :param out_channels: Size of each output sample. + :type out_channels: int + :param num_blocks: Number of building blocks. + :type num_blocks: int + :param int_emb_size: Embedding size used for interaction triplets + :type int_emb_size: int + :param basis_emb_size: Embedding size used in the basis transformation + :type basis_emb_size: int + :param out_emb_channels: Embedding size used for atoms in the output block + :type out_emb_channels: int + :param num_spherical: Number of spherical harmonics. + :type num_spherical: int + :param num_radial: Number of radial basis functions. + :type num_radial: int + :param cutoff: (float, optional): Cutoff distance for interatomic + interactions. (default: :obj:`5.0`) + :param envelope_exponent: Shape of the smooth cutoff. + (default: :obj:`5`) + :type envelope_exponent: int, optional + :param num_before_skip: (int, optional): Number of residual layers in the + interaction blocks before the skip connection. (default: :obj:`1`) + :param num_after_skip: (int, optional): Number of residual layers in the + interaction blocks after the skip connection. (default: :obj:`2`) + :param num_output_layers: (int, optional): Number of linear layers for the + output blocks. (default: :obj:`3`) + :param act: (function, optional): The activation funtion. + (default: :obj:`silu`) + + .. py:property:: num_params + :type: int + + + .. py:method:: _forward(data) + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/activation/index.rst b/_sources/autoapi/core/models/equiformer_v2/activation/index.rst new file mode 100644 index 000000000..89e859a6b --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/activation/index.rst @@ -0,0 +1,388 @@ +:py:mod:`core.models.equiformer_v2.activation` +============================================== + +.. py:module:: core.models.equiformer_v2.activation + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.activation.ScaledSiLU + core.models.equiformer_v2.activation.ScaledSwiGLU + core.models.equiformer_v2.activation.SwiGLU + core.models.equiformer_v2.activation.SmoothLeakyReLU + core.models.equiformer_v2.activation.ScaledSmoothLeakyReLU + core.models.equiformer_v2.activation.ScaledSigmoid + core.models.equiformer_v2.activation.GateActivation + core.models.equiformer_v2.activation.S2Activation + core.models.equiformer_v2.activation.SeparableS2Activation + + + + +.. py:class:: ScaledSiLU(inplace: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(inputs) + + + .. py:method:: extra_repr() + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: ScaledSwiGLU(in_channels: int, out_channels: int, bias: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(inputs) + + + +.. py:class:: SwiGLU(in_channels: int, out_channels: int, bias: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(inputs) + + + +.. py:class:: SmoothLeakyReLU(negative_slope: float = 0.2) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x) + + + .. py:method:: extra_repr() + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: ScaledSmoothLeakyReLU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x) + + + .. py:method:: extra_repr() + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: ScaledSigmoid + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: GateActivation(lmax: int, mmax: int, num_channels: int) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(gating_scalars, input_tensors) + + `gating_scalars`: shape [N, lmax * num_channels] + `input_tensors`: shape [N, (lmax + 1) ** 2, num_channels] + + + +.. py:class:: S2Activation(lmax: int, mmax: int) + + + Bases: :py:obj:`torch.nn.Module` + + Assume we only have one resolution + + .. py:method:: forward(inputs, SO3_grid) + + + +.. py:class:: SeparableS2Activation(lmax: int, mmax: int) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input_scalars, input_tensors, SO3_grid) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/drop/index.rst b/_sources/autoapi/core/models/equiformer_v2/drop/index.rst new file mode 100644 index 000000000..d753703b5 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/drop/index.rst @@ -0,0 +1,225 @@ +:py:mod:`core.models.equiformer_v2.drop` +======================================== + +.. py:module:: core.models.equiformer_v2.drop + +.. autoapi-nested-parse:: + + Add `extra_repr` into DropPath implemented by timm + for displaying more info. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.drop.DropPath + core.models.equiformer_v2.drop.GraphDropPath + core.models.equiformer_v2.drop.EquivariantDropout + core.models.equiformer_v2.drop.EquivariantScalarsDropout + core.models.equiformer_v2.drop.EquivariantDropoutArraySphericalHarmonics + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.drop.drop_path + + + +.. py:function:: drop_path(x: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor + + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + +.. py:class:: DropPath(drop_prob: float) + + + Bases: :py:obj:`torch.nn.Module` + + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + .. py:method:: extra_repr() -> str + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: GraphDropPath(drop_prob: float) + + + Bases: :py:obj:`torch.nn.Module` + + Consider batch for graph data when dropping paths. + + .. py:method:: forward(x: torch.Tensor, batch) -> torch.Tensor + + + .. py:method:: extra_repr() -> str + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: EquivariantDropout(irreps, drop_prob: float) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: EquivariantScalarsDropout(irreps, drop_prob: float) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + .. py:method:: extra_repr() -> str + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + +.. py:class:: EquivariantDropoutArraySphericalHarmonics(drop_prob: float, drop_graph: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor, batch=None) -> torch.Tensor + + + .. py:method:: extra_repr() -> str + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/edge_rot_mat/index.rst b/_sources/autoapi/core/models/equiformer_v2/edge_rot_mat/index.rst new file mode 100644 index 000000000..364942232 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/edge_rot_mat/index.rst @@ -0,0 +1,22 @@ +:py:mod:`core.models.equiformer_v2.edge_rot_mat` +================================================ + +.. py:module:: core.models.equiformer_v2.edge_rot_mat + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.edge_rot_mat.init_edge_rot_mat + + + +.. py:function:: init_edge_rot_mat(edge_distance_vec) + + diff --git a/_sources/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.rst b/_sources/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.rst new file mode 100644 index 000000000..c39765686 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.rst @@ -0,0 +1,152 @@ +:py:mod:`core.models.equiformer_v2.equiformer_v2_oc20` +====================================================== + +.. py:module:: core.models.equiformer_v2.equiformer_v2_oc20 + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.equiformer_v2_oc20.EquiformerV2_OC20 + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.equiformer_v2_oc20._AVG_NUM_NODES + core.models.equiformer_v2.equiformer_v2_oc20._AVG_DEGREE + + +.. py:data:: _AVG_NUM_NODES + :value: 77.81317 + + + +.. py:data:: _AVG_DEGREE + :value: 23.395238876342773 + + + +.. py:class:: EquiformerV2_OC20(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_neighbors: Maximum number of neighbors per atom + :type max_neighbors: int + :param max_radius: Maximum distance between nieghboring atoms in Angstroms + :type max_radius: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_layers: Number of layers in the GNN + :type num_layers: int + :param sphere_channels: Number of spherical channels (one set per resolution) + :type sphere_channels: int + :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention + :type attn_hidden_channels: int + :param num_heads: Number of attention heads + :type num_heads: int + :param attn_alpha_head: Number of channels for alpha vector in each attention head + :type attn_alpha_head: int + :param attn_value_head: Number of channels for value vector in each attention head + :type attn_value_head: int + :param ffn_hidden_channels: Number of hidden channels used during feedforward network + :type ffn_hidden_channels: int + :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh', 'rms_norm_sh']) + :type norm_type: str + :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) + :type lmax_list: int + :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) + :type mmax_list: int + :param grid_resolution: Resolution of SO3_Grid + :type grid_resolution: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param edge_channels: Number of channels for the edge invariant features + :type edge_channels: int + :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features + :type use_atom_edge_embedding: bool + :param share_atom_edge_embedding: Whether to share `atom_edge_embedding` across all blocks + :type share_atom_edge_embedding: bool + :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights + :type use_m_share_rad: bool + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param attn_activation: Type of activation function for SO(2) graph attention + :type attn_activation: str + :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer + :type use_s2_act_attn: bool + :param use_attn_renorm: Whether to re-normalize attention weights + :type use_attn_renorm: bool + :param ffn_activation: Type of activation function for feedforward network + :type ffn_activation: str + :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation + :type use_gate_act: bool + :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFNs. + :type use_grid_mlp: bool + :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. + :type use_sep_s2_act: bool + :param alpha_drop: Dropout rate for attention weights + :type alpha_drop: float + :param drop_path_rate: Drop path rate + :type drop_path_rate: float + :param proj_drop: Dropout rate for outputs of attention and FFN in Transformer blocks + :type proj_drop: float + :param weight_init: ['normal', 'uniform'] initialization of weights of linear layers except those in radial functions + :type weight_init: str + :param enforce_max_neighbors_strictly: When edges are subselected based on the `max_neighbors` arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number. + :type enforce_max_neighbors_strictly: bool + :param avg_num_nodes: Average number of nodes per graph + :type avg_num_nodes: float + :param avg_degree: Average degree of nodes in the graph + :type avg_degree: float + :param use_energy_lin_ref: Whether to add the per-atom energy references during prediction. + During training and validation, this should be kept `False` since we use the `lin_ref` parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. + During prediction (where we don't have energy targets), this can be set to `True` to add the per-atom linear references to the predicted energies. + :type use_energy_lin_ref: bool + :param load_energy_lin_ref: Whether to add nn.Parameters for the per-element energy references. + This additional flag is there to ensure compatibility when strict-loading checkpoints, since the `use_energy_lin_ref` flag can be either True or False even if the model is trained with linear references. + You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine. + :type load_energy_lin_ref: bool + + .. py:property:: num_params + + + .. py:method:: forward(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + .. py:method:: _init_weights(m) + + + .. py:method:: _uniform_init_rad_func_linear_weights(m) + + + .. py:method:: _uniform_init_linear_weights(m) + + + .. py:method:: no_weight_decay() -> set + + Returns a list of parameters with no weight decay. + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/gaussian_rbf/index.rst b/_sources/autoapi/core/models/equiformer_v2/gaussian_rbf/index.rst new file mode 100644 index 000000000..c4d81a499 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/gaussian_rbf/index.rst @@ -0,0 +1,79 @@ +:py:mod:`core.models.equiformer_v2.gaussian_rbf` +================================================ + +.. py:module:: core.models.equiformer_v2.gaussian_rbf + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.gaussian_rbf.GaussianRadialBasisLayer + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.gaussian_rbf.gaussian + + + +.. py:function:: gaussian(x: torch.Tensor, mean, std) -> torch.Tensor + + +.. py:class:: GaussianRadialBasisLayer(num_basis: int, cutoff: float) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist: torch.Tensor, node_atom=None, edge_src=None, edge_dst=None) + + + .. py:method:: extra_repr() + + Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/index.rst b/_sources/autoapi/core/models/equiformer_v2/index.rst new file mode 100644 index 000000000..f845eaf72 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/index.rst @@ -0,0 +1,163 @@ +:py:mod:`core.models.equiformer_v2` +=================================== + +.. py:module:: core.models.equiformer_v2 + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + trainers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + activation/index.rst + drop/index.rst + edge_rot_mat/index.rst + equiformer_v2_oc20/index.rst + gaussian_rbf/index.rst + input_block/index.rst + layer_norm/index.rst + module_list/index.rst + radial_function/index.rst + so2_ops/index.rst + so3/index.rst + transformer_block/index.rst + wigner/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.EquiformerV2 + + + + +.. py:class:: EquiformerV2(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_neighbors: Maximum number of neighbors per atom + :type max_neighbors: int + :param max_radius: Maximum distance between nieghboring atoms in Angstroms + :type max_radius: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_layers: Number of layers in the GNN + :type num_layers: int + :param sphere_channels: Number of spherical channels (one set per resolution) + :type sphere_channels: int + :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention + :type attn_hidden_channels: int + :param num_heads: Number of attention heads + :type num_heads: int + :param attn_alpha_head: Number of channels for alpha vector in each attention head + :type attn_alpha_head: int + :param attn_value_head: Number of channels for value vector in each attention head + :type attn_value_head: int + :param ffn_hidden_channels: Number of hidden channels used during feedforward network + :type ffn_hidden_channels: int + :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh', 'rms_norm_sh']) + :type norm_type: str + :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) + :type lmax_list: int + :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) + :type mmax_list: int + :param grid_resolution: Resolution of SO3_Grid + :type grid_resolution: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param edge_channels: Number of channels for the edge invariant features + :type edge_channels: int + :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features + :type use_atom_edge_embedding: bool + :param share_atom_edge_embedding: Whether to share `atom_edge_embedding` across all blocks + :type share_atom_edge_embedding: bool + :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights + :type use_m_share_rad: bool + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param attn_activation: Type of activation function for SO(2) graph attention + :type attn_activation: str + :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer + :type use_s2_act_attn: bool + :param use_attn_renorm: Whether to re-normalize attention weights + :type use_attn_renorm: bool + :param ffn_activation: Type of activation function for feedforward network + :type ffn_activation: str + :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation + :type use_gate_act: bool + :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFNs. + :type use_grid_mlp: bool + :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. + :type use_sep_s2_act: bool + :param alpha_drop: Dropout rate for attention weights + :type alpha_drop: float + :param drop_path_rate: Drop path rate + :type drop_path_rate: float + :param proj_drop: Dropout rate for outputs of attention and FFN in Transformer blocks + :type proj_drop: float + :param weight_init: ['normal', 'uniform'] initialization of weights of linear layers except those in radial functions + :type weight_init: str + :param enforce_max_neighbors_strictly: When edges are subselected based on the `max_neighbors` arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number. + :type enforce_max_neighbors_strictly: bool + :param avg_num_nodes: Average number of nodes per graph + :type avg_num_nodes: float + :param avg_degree: Average degree of nodes in the graph + :type avg_degree: float + :param use_energy_lin_ref: Whether to add the per-atom energy references during prediction. + During training and validation, this should be kept `False` since we use the `lin_ref` parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. + During prediction (where we don't have energy targets), this can be set to `True` to add the per-atom linear references to the predicted energies. + :type use_energy_lin_ref: bool + :param load_energy_lin_ref: Whether to add nn.Parameters for the per-element energy references. + This additional flag is there to ensure compatibility when strict-loading checkpoints, since the `use_energy_lin_ref` flag can be either True or False even if the model is trained with linear references. + You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine. + :type load_energy_lin_ref: bool + + .. py:property:: num_params + + + .. py:method:: forward(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + .. py:method:: _init_weights(m) + + + .. py:method:: _uniform_init_rad_func_linear_weights(m) + + + .. py:method:: _uniform_init_linear_weights(m) + + + .. py:method:: no_weight_decay() -> set + + Returns a list of parameters with no weight decay. + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/input_block/index.rst b/_sources/autoapi/core/models/equiformer_v2/input_block/index.rst new file mode 100644 index 000000000..b760c5c9f --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/input_block/index.rst @@ -0,0 +1,44 @@ +:py:mod:`core.models.equiformer_v2.input_block` +=============================================== + +.. py:module:: core.models.equiformer_v2.input_block + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.input_block.EdgeDegreeEmbedding + + + + +.. py:class:: EdgeDegreeEmbedding(sphere_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool, rescale_factor) + + + Bases: :py:obj:`torch.nn.Module` + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings + :param mappingReduced: Class to convert l and m indices once node embedding is rotated + :type mappingReduced: CoefficientMappingModule + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. + The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. + :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features + :type use_atom_edge_embedding: bool + :param rescale_factor: Rescale the sum aggregation + :type rescale_factor: float + + .. py:method:: forward(atomic_numbers, edge_distance, edge_index) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst b/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst new file mode 100644 index 000000000..20f0bf232 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst @@ -0,0 +1,168 @@ +:py:mod:`core.models.equiformer_v2.layer_norm` +============================================== + +.. py:module:: core.models.equiformer_v2.layer_norm + +.. autoapi-nested-parse:: + + 1. Normalize features of shape (N, sphere_basis, C), + with sphere_basis = (lmax + 1) ** 2. + + 2. The difference from `layer_norm.py` is that all type-L vectors have + the same number of channels and input features are of shape (N, sphere_basis, C). + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.layer_norm.EquivariantLayerNormArray + core.models.equiformer_v2.layer_norm.EquivariantLayerNormArraySphericalHarmonics + core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonics + core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonicsV2 + core.models.equiformer_v2.layer_norm.EquivariantDegreeLayerScale + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.layer_norm.get_normalization_layer + core.models.equiformer_v2.layer_norm.get_l_to_all_m_expand_index + + + +.. py:function:: get_normalization_layer(norm_type: str, lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') + + +.. py:function:: get_l_to_all_m_expand_index(lmax: int) + + +.. py:class:: EquivariantLayerNormArray(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: __repr__() -> str + + Return repr(self). + + + .. py:method:: forward(node_input) + + Assume input is of shape [N, sphere_basis, C] + + + +.. py:class:: EquivariantLayerNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', std_balance_degrees: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + 1. Normalize over L = 0. + 2. Normalize across all m components from degrees L > 0. + 3. Do not normalize separately for different L (L > 0). + + .. py:method:: __repr__() -> str + + Return repr(self). + + + .. py:method:: forward(node_input) + + Assume input is of shape [N, sphere_basis, C] + + + +.. py:class:: EquivariantRMSNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') + + + Bases: :py:obj:`torch.nn.Module` + + 1. Normalize across all m components from degrees L >= 0. + + .. py:method:: __repr__() -> str + + Return repr(self). + + + .. py:method:: forward(node_input) + + Assume input is of shape [N, sphere_basis, C] + + + +.. py:class:: EquivariantRMSNormArraySphericalHarmonicsV2(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', centering: bool = True, std_balance_degrees: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + 1. Normalize across all m components from degrees L >= 0. + 2. Expand weights and multiply with normalized feature to prevent slicing and concatenation. + + .. py:method:: __repr__() -> str + + Return repr(self). + + + .. py:method:: forward(node_input) + + Assume input is of shape [N, sphere_basis, C] + + + +.. py:class:: EquivariantDegreeLayerScale(lmax: int, num_channels: int, scale_factor: float = 2.0) + + + Bases: :py:obj:`torch.nn.Module` + + 1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV'21)), we scale the output of both attention and FFN. + 2. For degree L > 0, we scale down the square root of 2 * L, which is to emulate halving the number of channels when using higher L. + + .. py:method:: __repr__() -> str + + Return repr(self). + + + .. py:method:: forward(node_input) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/module_list/index.rst b/_sources/autoapi/core/models/equiformer_v2/module_list/index.rst new file mode 100644 index 000000000..bd57f9e94 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/module_list/index.rst @@ -0,0 +1,52 @@ +:py:mod:`core.models.equiformer_v2.module_list` +=============================================== + +.. py:module:: core.models.equiformer_v2.module_list + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.module_list.ModuleListInfo + + + + +.. py:class:: ModuleListInfo(info_str, modules=None) + + + Bases: :py:obj:`torch.nn.ModuleList` + + Holds submodules in a list. + + :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but + modules it contains are properly registered, and will be visible by all + :class:`~torch.nn.Module` methods. + + :param modules: an iterable of modules to add + :type modules: iterable, optional + + Example:: + + class MyModule(nn.Module): + def __init__(self): + super().__init__() + self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) + + def forward(self, x): + # ModuleList can act as an iterable, or be indexed using ints + for i, l in enumerate(self.linears): + x = self.linears[i // 2](x) + l(x) + return x + + .. py:method:: __repr__() -> str + + Return a custom repr for ModuleList that compresses repeated module representations. + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst b/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst new file mode 100644 index 000000000..383894fc3 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst @@ -0,0 +1,30 @@ +:py:mod:`core.models.equiformer_v2.radial_function` +=================================================== + +.. py:module:: core.models.equiformer_v2.radial_function + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.radial_function.RadialFunction + + + + +.. py:class:: RadialFunction(channels_list) + + + Bases: :py:obj:`torch.nn.Module` + + Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels + + .. py:method:: forward(inputs) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst b/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst new file mode 100644 index 000000000..44119da12 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst @@ -0,0 +1,89 @@ +:py:mod:`core.models.equiformer_v2.so2_ops` +=========================================== + +.. py:module:: core.models.equiformer_v2.so2_ops + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.so2_ops.SO2_m_Convolution + core.models.equiformer_v2.so2_ops.SO2_Convolution + core.models.equiformer_v2.so2_ops.SO2_Linear + + + + +.. py:class:: SO2_m_Convolution(m: int, sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int]) + + + Bases: :py:obj:`torch.nn.Module` + + SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m + + :param m: Order of the spherical harmonic coefficients + :type m: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param m_output_channels: Number of output channels used during the SO(2) conv + :type m_output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + + .. py:method:: forward(x_m) + + + +.. py:class:: SO2_Convolution(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = True, edge_channels_list: list[int] | None = None, extra_m0_output_channels: int | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + SO(2) Block: Perform SO(2) convolutions for all m (orders) + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param m_output_channels: Number of output channels used during the SO(2) conv + :type m_output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param mappingReduced: Used to extract a subset of m components + :type mappingReduced: CoefficientMappingModule + :param internal_weights: If True, not using radial function to multiply inputs features + :type internal_weights: bool + :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. + :param extra_m0_output_channels: If not None, return `out_embedding` (SO3_Embedding) and `extra_m0_features` (Tensor). + :type extra_m0_output_channels: int + + .. py:method:: forward(x, x_edge) + + + +.. py:class:: SO2_Linear(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = False, edge_channels_list: list[int] | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + SO(2) Linear: Perform SO(2) linear for all m (orders). + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param m_output_channels: Number of output channels used during the SO(2) conv + :type m_output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param mappingReduced: Used to extract a subset of m components + :type mappingReduced: CoefficientMappingModule + :param internal_weights: If True, not using radial function to multiply inputs features + :type internal_weights: bool + :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. + + .. py:method:: forward(x, x_edge) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/so3/index.rst b/_sources/autoapi/core/models/equiformer_v2/so3/index.rst new file mode 100644 index 000000000..54d511980 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/so3/index.rst @@ -0,0 +1,257 @@ +:py:mod:`core.models.equiformer_v2.so3` +======================================= + +.. py:module:: core.models.equiformer_v2.so3 + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + .. todo:: + + 1. Simplify the case when `num_resolutions` == 1. + 2. Remove indexing when the shape is the same. + 3. Move some functions outside classes and to separate files. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.so3.CoefficientMappingModule + core.models.equiformer_v2.so3.SO3_Embedding + core.models.equiformer_v2.so3.SO3_Rotation + core.models.equiformer_v2.so3.SO3_Grid + core.models.equiformer_v2.so3.SO3_Linear + core.models.equiformer_v2.so3.SO3_LinearV2 + + + + +.. py:class:: CoefficientMappingModule(lmax_list: list[int], mmax_list: list[int]) + + + Bases: :py:obj:`torch.nn.Module` + + Helper module for coefficients used to reshape lval <--> m and to get coefficients of specific degree or order + + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + :param mmax_list (list: int): List of maximum order of the spherical harmonics + + .. py:method:: complex_idx(m: int, lmax: int, m_complex, l_harmonic) + + Add `m_complex` and `l_harmonic` to the input arguments + since we cannot use `self.m_complex`. + + + .. py:method:: coefficient_idx(lmax: int, mmax: int) + + + .. py:method:: get_rotate_inv_rescale(lmax: int, mmax: int) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + +.. py:class:: SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype) + + + Helper functions for performing operations on irreps embedding + + :param length: Batch size + :type length: int + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + :param num_channels: Number of channels + :type num_channels: int + :param device: Device of the output + :param dtype: type of the output tensors + + .. py:method:: clone() -> SO3_Embedding + + + .. py:method:: set_embedding(embedding) -> None + + + .. py:method:: set_lmax_mmax(lmax_list: list[int], mmax_list: list[int]) -> None + + + .. py:method:: _expand_edge(edge_index: torch.Tensor) -> None + + + .. py:method:: expand_edge(edge_index: torch.Tensor) + + + .. py:method:: _reduce_edge(edge_index: torch.Tensor, num_nodes: int) + + + .. py:method:: _m_primary(mapping) + + + .. py:method:: _l_primary(mapping) + + + .. py:method:: _rotate(SO3_rotation, lmax_list: list[int], mmax_list: list[int]) + + + .. py:method:: _rotate_inv(SO3_rotation, mappingReduced) + + + .. py:method:: _grid_act(SO3_grid, act, mappingReduced) + + + .. py:method:: to_grid(SO3_grid, lmax=-1) + + + .. py:method:: _from_grid(x_grid, SO3_grid, lmax: int = -1) + + + +.. py:class:: SO3_Rotation(lmax: int) + + + Bases: :py:obj:`torch.nn.Module` + + Helper functions for Wigner-D rotations + + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + + .. py:method:: set_wigner(rot_mat3x3) + + + .. py:method:: rotate(embedding, out_lmax: int, out_mmax: int) + + + .. py:method:: rotate_inv(embedding, in_lmax: int, in_mmax: int) + + + .. py:method:: RotationToWignerDMatrix(edge_rot_mat, start_lmax: int, end_lmax: int) -> torch.Tensor + + + +.. py:class:: SO3_Grid(lmax: int, mmax: int, normalization: str = 'integral', resolution: int | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Helper functions for grid representation of the irreps + + :param lmax: Maximum degree of the spherical harmonics + :type lmax: int + :param mmax: Maximum order of the spherical harmonics + :type mmax: int + + .. py:method:: get_to_grid_mat(device) + + + .. py:method:: get_from_grid_mat(device) + + + .. py:method:: to_grid(embedding, lmax: int, mmax: int) + + + .. py:method:: from_grid(grid, lmax: int, mmax: int) + + + +.. py:class:: SO3_Linear(in_features: int, out_features: int, lmax: int, bias: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input_embedding, output_scale=None) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + +.. py:class:: SO3_LinearV2(in_features: int, out_features: int, lmax: int, bias: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input_embedding) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.rst b/_sources/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.rst new file mode 100644 index 000000000..c22178614 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.rst @@ -0,0 +1,88 @@ +:py:mod:`core.models.equiformer_v2.trainers.energy_trainer` +=========================================================== + +.. py:module:: core.models.equiformer_v2.trainers.energy_trainer + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.trainers.energy_trainer.EquiformerV2EnergyTrainer + + + + +.. py:class:: EquiformerV2EnergyTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') + + + Bases: :py:obj:`fairchem.core.trainers.OCPTrainer` + + Trainer class for the Structure to Energy & Force (S2EF) and Initial State to + Relaxed State (IS2RS) tasks. + + .. note:: + + Examples of configurations for task, model, dataset and optimizer + can be found in `configs/ocp_s2ef `_ + and `configs/ocp_is2rs `_. + + :param task: Task configuration. + :type task: dict + :param model: Model configuration. + :type model: dict + :param outputs: Output property configuration. + :type outputs: dict + :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. + :type dataset: dict + :param optimizer: Optimizer configuration. + :type optimizer: dict + :param loss_fns: Loss function configuration. + :type loss_fns: dict + :param eval_metrics: Evaluation metrics configuration. + :type eval_metrics: dict + :param identifier: Experiment identifier that is appended to log directory. + :type identifier: str + :param run_dir: Path to the run directory where logs are to be saved. + (default: :obj:`None`) + :type run_dir: str, optional + :param is_debug: Run in debug mode. + (default: :obj:`False`) + :type is_debug: bool, optional + :param print_every: Frequency of printing logs. + (default: :obj:`100`) + :type print_every: int, optional + :param seed: Random number seed. + (default: :obj:`None`) + :type seed: int, optional + :param logger: Type of logger to be used. + (default: :obj:`wandb`) + :type logger: str, optional + :param local_rank: Local rank of the process, only applicable for distributed training. + (default: :obj:`0`) + :type local_rank: int, optional + :param amp: Run using automatic mixed precision. + (default: :obj:`False`) + :type amp: bool, optional + :param slurm: Slurm configuration. Currently just for keeping track. + (default: :obj:`{}`) + :type slurm: dict + :param noddp: Run model without DDP. + :type noddp: bool, optional + + .. py:method:: load_extras() + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.rst b/_sources/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.rst new file mode 100644 index 000000000..756a4dbab --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.rst @@ -0,0 +1,88 @@ +:py:mod:`core.models.equiformer_v2.trainers.forces_trainer` +=========================================================== + +.. py:module:: core.models.equiformer_v2.trainers.forces_trainer + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.trainers.forces_trainer.EquiformerV2ForcesTrainer + + + + +.. py:class:: EquiformerV2ForcesTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') + + + Bases: :py:obj:`fairchem.core.trainers.OCPTrainer` + + Trainer class for the Structure to Energy & Force (S2EF) and Initial State to + Relaxed State (IS2RS) tasks. + + .. note:: + + Examples of configurations for task, model, dataset and optimizer + can be found in `configs/ocp_s2ef `_ + and `configs/ocp_is2rs `_. + + :param task: Task configuration. + :type task: dict + :param model: Model configuration. + :type model: dict + :param outputs: Output property configuration. + :type outputs: dict + :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. + :type dataset: dict + :param optimizer: Optimizer configuration. + :type optimizer: dict + :param loss_fns: Loss function configuration. + :type loss_fns: dict + :param eval_metrics: Evaluation metrics configuration. + :type eval_metrics: dict + :param identifier: Experiment identifier that is appended to log directory. + :type identifier: str + :param run_dir: Path to the run directory where logs are to be saved. + (default: :obj:`None`) + :type run_dir: str, optional + :param is_debug: Run in debug mode. + (default: :obj:`False`) + :type is_debug: bool, optional + :param print_every: Frequency of printing logs. + (default: :obj:`100`) + :type print_every: int, optional + :param seed: Random number seed. + (default: :obj:`None`) + :type seed: int, optional + :param logger: Type of logger to be used. + (default: :obj:`wandb`) + :type logger: str, optional + :param local_rank: Local rank of the process, only applicable for distributed training. + (default: :obj:`0`) + :type local_rank: int, optional + :param amp: Run using automatic mixed precision. + (default: :obj:`False`) + :type amp: bool, optional + :param slurm: Slurm configuration. Currently just for keeping track. + (default: :obj:`{}`) + :type slurm: dict + :param noddp: Run model without DDP. + :type noddp: bool, optional + + .. py:method:: load_extras() -> None + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/trainers/index.rst b/_sources/autoapi/core/models/equiformer_v2/trainers/index.rst new file mode 100644 index 000000000..563f298e7 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/trainers/index.rst @@ -0,0 +1,17 @@ +:py:mod:`core.models.equiformer_v2.trainers` +============================================ + +.. py:module:: core.models.equiformer_v2.trainers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + energy_trainer/index.rst + forces_trainer/index.rst + lr_scheduler/index.rst + + diff --git a/_sources/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.rst b/_sources/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.rst new file mode 100644 index 000000000..03782ace4 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.rst @@ -0,0 +1,97 @@ +:py:mod:`core.models.equiformer_v2.trainers.lr_scheduler` +========================================================= + +.. py:module:: core.models.equiformer_v2.trainers.lr_scheduler + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.trainers.lr_scheduler.CosineLRLambda + core.models.equiformer_v2.trainers.lr_scheduler.MultistepLRLambda + core.models.equiformer_v2.trainers.lr_scheduler.LRScheduler + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.trainers.lr_scheduler.multiply + core.models.equiformer_v2.trainers.lr_scheduler.cosine_lr_lambda + core.models.equiformer_v2.trainers.lr_scheduler.multistep_lr_lambda + + + +.. py:function:: multiply(obj, num) + + +.. py:function:: cosine_lr_lambda(current_step: int, scheduler_params) + + +.. py:class:: CosineLRLambda(scheduler_params) + + + .. py:method:: __call__(current_step: int) + + + +.. py:function:: multistep_lr_lambda(current_step: int, scheduler_params) -> float + + +.. py:class:: MultistepLRLambda(scheduler_params) + + + .. py:method:: __call__(current_step: int) -> float + + + +.. py:class:: LRScheduler(optimizer, config) + + + .. rubric:: Notes + + 1. scheduler.step() is called for every step for OC20 training. + 2. We use "scheduler_params" in .yml to specify scheduler parameters. + 3. For cosine learning rate, we use LambdaLR with lambda function being cosine: + scheduler: LambdaLR + scheduler_params: + lambda_type: cosine + ... + 4. Following 3., if `cosine` is used, `scheduler_params` in .yml looks like: + scheduler: LambdaLR + scheduler_params: + lambda_type: cosine + warmup_epochs: ... + warmup_factor: ... + lr_min_factor: ... + 5. Following 3., if `multistep` is used, `scheduler_params` in .yml looks like: + scheduler: LambdaLR + scheduler_params: + lambda_type: multistep + warmup_epochs: ... + warmup_factor: ... + decay_epochs: ... (list) + decay_rate: ... + + :param optimizer: torch optim object + :type optimizer: obj + :param config: Optim dict from the input config + :type config: dict + + .. py:method:: step(metrics=None, epoch=None) + + + .. py:method:: filter_kwargs(config) + + + .. py:method:: get_lr() -> float | None + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst b/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst new file mode 100644 index 000000000..68b72fb1e --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst @@ -0,0 +1,165 @@ +:py:mod:`core.models.equiformer_v2.transformer_block` +===================================================== + +.. py:module:: core.models.equiformer_v2.transformer_block + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.transformer_block.SO2EquivariantGraphAttention + core.models.equiformer_v2.transformer_block.FeedForwardNetwork + core.models.equiformer_v2.transformer_block.TransBlockV2 + + + + +.. py:class:: SO2EquivariantGraphAttention(sphere_channels: int, hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, activation='scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, use_gate_act: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.0) + + + Bases: :py:obj:`torch.nn.Module` + + SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing + SO(2) Convolution with radial function -> S2 Activation -> SO(2) Convolution -> attention weights and non-linear messages + attention weights * non-linear messages -> Linear + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during the SO(2) conv + :type hidden_channels: int + :param num_heads: Number of attention heads + :type num_heads: int + :param attn_alpha_head: Number of channels for alpha vector in each attention head + :type attn_alpha_head: int + :param attn_value_head: Number of channels for value vector in each attention head + :type attn_value_head: int + :param output_channels: Number of output channels + :type output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings + :param mappingReduced: Class to convert l and m indices once node embedding is rotated + :type mappingReduced: CoefficientMappingModule + :param SO3_grid: Class used to convert from grid the spherical harmonic representations + :type SO3_grid: SO3_grid + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. + The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. + :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features + :type use_atom_edge_embedding: bool + :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights + :type use_m_share_rad: bool + :param activation: Type of activation function + :type activation: str + :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer + :type use_s2_act_attn: bool + :param use_attn_renorm: Whether to re-normalize attention weights + :type use_attn_renorm: bool + :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation. + :type use_gate_act: bool + :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. + :type use_sep_s2_act: bool + :param alpha_drop: Dropout rate for attention weights + :type alpha_drop: float + + .. py:method:: forward(x: torch.Tensor, atomic_numbers, edge_distance: torch.Tensor, edge_index) + + + +.. py:class:: FeedForwardNetwork(sphere_channels: int, hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_grid, activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during feedforward network + :type hidden_channels: int + :param output_channels: Number of output channels + :type output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param SO3_grid: Class used to convert from grid the spherical harmonic representations + :type SO3_grid: SO3_grid + :param activation: Type of activation function + :type activation: str + :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation + :type use_gate_act: bool + :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs. + :type use_grid_mlp: bool + :param use_sep_s2_act: If `True`, use separable grid MLP when `use_grid_mlp` is True. + :type use_sep_s2_act: bool + + .. py:method:: forward(input_embedding) + + + +.. py:class:: TransBlockV2(sphere_channels: int, attn_hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, ffn_hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list: list[int], use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, attn_activation: str = 'silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, norm_type: str = 'rms_norm_sh', alpha_drop: float = 0.0, drop_path_rate: float = 0.0, proj_drop: float = 0.0) + + + Bases: :py:obj:`torch.nn.Module` + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention + :type attn_hidden_channels: int + :param num_heads: Number of attention heads + :type num_heads: int + :param attn_alpha_head: Number of channels for alpha vector in each attention head + :type attn_alpha_head: int + :param attn_value_head: Number of channels for value vector in each attention head + :type attn_value_head: int + :param ffn_hidden_channels: Number of hidden channels used during feedforward network + :type ffn_hidden_channels: int + :param output_channels: Number of output channels + :type output_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings + :param mappingReduced: Class to convert l and m indices once node embedding is rotated + :type mappingReduced: CoefficientMappingModule + :param SO3_grid: Class used to convert from grid the spherical harmonic representations + :type SO3_grid: SO3_grid + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. + The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. + :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features + :type use_atom_edge_embedding: bool + :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights + :type use_m_share_rad: bool + :param attn_activation: Type of activation function for SO(2) graph attention + :type attn_activation: str + :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer + :type use_s2_act_attn: bool + :param use_attn_renorm: Whether to re-normalize attention weights + :type use_attn_renorm: bool + :param ffn_activation: Type of activation function for feedforward network + :type ffn_activation: str + :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation + :type use_gate_act: bool + :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFN. + :type use_grid_mlp: bool + :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. + :type use_sep_s2_act: bool + :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh']) + :type norm_type: str + :param alpha_drop: Dropout rate for attention weights + :type alpha_drop: float + :param drop_path_rate: Drop path rate + :type drop_path_rate: float + :param proj_drop: Dropout rate for outputs of attention and FFN + :type proj_drop: float + + .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, batch) + + + diff --git a/_sources/autoapi/core/models/equiformer_v2/wigner/index.rst b/_sources/autoapi/core/models/equiformer_v2/wigner/index.rst new file mode 100644 index 000000000..a064782f8 --- /dev/null +++ b/_sources/autoapi/core/models/equiformer_v2/wigner/index.rst @@ -0,0 +1,38 @@ +:py:mod:`core.models.equiformer_v2.wigner` +========================================== + +.. py:module:: core.models.equiformer_v2.wigner + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.wigner.wigner_D + core.models.equiformer_v2.wigner._z_rot_mat + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.equiformer_v2.wigner._Jd + + +.. py:data:: _Jd + + + +.. py:function:: wigner_D(lv: int, alpha: torch.Tensor, beta: torch.Tensor, gamma: torch.Tensor) -> torch.Tensor + + +.. py:function:: _z_rot_mat(angle: torch.Tensor, lv: int) -> torch.Tensor + + diff --git a/_sources/autoapi/core/models/escn/escn/index.rst b/_sources/autoapi/core/models/escn/escn/index.rst new file mode 100644 index 000000000..d60d22a36 --- /dev/null +++ b/_sources/autoapi/core/models/escn/escn/index.rst @@ -0,0 +1,251 @@ +:py:mod:`core.models.escn.escn` +=============================== + +.. py:module:: core.models.escn.escn + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.escn.escn.eSCN + core.models.escn.escn.LayerBlock + core.models.escn.escn.MessageBlock + core.models.escn.escn.SO2Block + core.models.escn.escn.SO2Conv + core.models.escn.escn.EdgeBlock + core.models.escn.escn.EnergyBlock + core.models.escn.escn.ForceBlock + + + + +.. py:class:: eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Equivariant Spherical Channel Network + Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs + + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_neighbors: Maximum number of neighbors per atom + :type max_neighbors: int + :param cutoff: Maximum distance between nieghboring atoms in Angstroms + :type cutoff: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_layers: Number of layers in the GNN + :type num_layers: int + :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) + :type lmax_list: int + :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) + :type mmax_list: int + :param sphere_channels: Number of spherical channels (one set per resolution) + :type sphere_channels: int + :param hidden_channels: Number of hidden units in message passing + :type hidden_channels: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param edge_channels: Number of channels for the edge invariant features + :type edge_channels: int + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param basis_width_scalar: Width of distance basis function + :type basis_width_scalar: float + :param distance_resolution: Distance between distance basis functions in Angstroms + :type distance_resolution: float + :param show_timing_info: Show timing and memory info + :type show_timing_info: bool + + .. py:property:: num_params + :type: int + + + .. py:method:: forward(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + +.. py:class:: LayerBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act) + + + Bases: :py:obj:`torch.nn.Module` + + Layer block: Perform one layer (message passing and aggregation) of the GNN + + :param layer_idx: Layer number + :type layer_idx: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during the SO(2) conv + :type hidden_channels: int + :param edge_channels: Size of invariant edge embedding + :type edge_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param distance_expansion: Function used to compute distance embedding + :type distance_expansion: func + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param SO3_grid: Class used to convert from grid the spherical harmonic representations + :type SO3_grid: SO3_grid + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced) + + + +.. py:class:: MessageBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act) + + + Bases: :py:obj:`torch.nn.Module` + + Message block: Perform message passing + + :param layer_idx: Layer number + :type layer_idx: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during the SO(2) conv + :type hidden_channels: int + :param edge_channels: Size of invariant edge embedding + :type edge_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param distance_expansion: Function used to compute distance embedding + :type distance_expansion: func + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param SO3_grid: Class used to convert from grid the spherical harmonic representations + :type SO3_grid: SO3_grid + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced) + + + +.. py:class:: SO2Block(sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act) + + + Bases: :py:obj:`torch.nn.Module` + + SO(2) Block: Perform SO(2) convolutions for all m (orders) + + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during the SO(2) conv + :type hidden_channels: int + :param edge_channels: Size of invariant edge embedding + :type edge_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x, x_edge, mappingReduced) + + + +.. py:class:: SO2Conv(m: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act) + + + Bases: :py:obj:`torch.nn.Module` + + SO(2) Conv: Perform an SO(2) convolution + + :param m: Order of the spherical harmonic coefficients + :type m: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param hidden_channels: Number of hidden channels used during the SO(2) conv + :type hidden_channels: int + :param edge_channels: Size of invariant edge embedding + :type edge_channels: int + :param lmax_list (list: int): List of degrees (l) for each resolution + :param mmax_list (list: int): List of orders (m) for each resolution + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x_m, x_edge) -> torch.Tensor + + + +.. py:class:: EdgeBlock(edge_channels, distance_expansion, max_num_elements, act) + + + Bases: :py:obj:`torch.nn.Module` + + Edge Block: Compute invariant edge representation from edge diatances and atomic numbers + + :param edge_channels: Size of invariant edge embedding + :type edge_channels: int + :param distance_expansion: Function used to compute distance embedding + :type distance_expansion: func + :param max_num_elements: Maximum number of atomic numbers + :type max_num_elements: int + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(edge_distance, source_element, target_element) + + + +.. py:class:: EnergyBlock(num_channels: int, num_sphere_samples: int, act) + + + Bases: :py:obj:`torch.nn.Module` + + Energy Block: Output block computing the energy + + :param num_channels: Number of channels + :type num_channels: int + :param num_sphere_samples: Number of samples used to approximate the integral on the sphere + :type num_sphere_samples: int + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x_pt) -> torch.Tensor + + + +.. py:class:: ForceBlock(num_channels: int, num_sphere_samples: int, act) + + + Bases: :py:obj:`torch.nn.Module` + + Force Block: Output block computing the per atom forces + + :param num_channels: Number of channels + :type num_channels: int + :param num_sphere_samples: Number of samples used to approximate the integral on the sphere + :type num_sphere_samples: int + :param act: Non-linear activation function + :type act: function + + .. py:method:: forward(x_pt, sphere_points) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/escn/index.rst b/_sources/autoapi/core/models/escn/index.rst new file mode 100644 index 000000000..1a056d30e --- /dev/null +++ b/_sources/autoapi/core/models/escn/index.rst @@ -0,0 +1,84 @@ +:py:mod:`core.models.escn` +========================== + +.. py:module:: core.models.escn + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + escn/index.rst + so3/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.escn.eSCN + + + + +.. py:class:: eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Equivariant Spherical Channel Network + Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs + + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_neighbors: Maximum number of neighbors per atom + :type max_neighbors: int + :param cutoff: Maximum distance between nieghboring atoms in Angstroms + :type cutoff: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_layers: Number of layers in the GNN + :type num_layers: int + :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) + :type lmax_list: int + :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) + :type mmax_list: int + :param sphere_channels: Number of spherical channels (one set per resolution) + :type sphere_channels: int + :param hidden_channels: Number of hidden units in message passing + :type hidden_channels: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param edge_channels: Number of channels for the edge invariant features + :type edge_channels: int + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param basis_width_scalar: Width of distance basis function + :type basis_width_scalar: float + :param distance_resolution: Distance between distance basis functions in Angstroms + :type distance_resolution: float + :param show_timing_info: Show timing and memory info + :type show_timing_info: bool + + .. py:property:: num_params + :type: int + + + .. py:method:: forward(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + diff --git a/_sources/autoapi/core/models/escn/so3/index.rst b/_sources/autoapi/core/models/escn/so3/index.rst new file mode 100644 index 000000000..472a6c422 --- /dev/null +++ b/_sources/autoapi/core/models/escn/so3/index.rst @@ -0,0 +1,171 @@ +:py:mod:`core.models.escn.so3` +============================== + +.. py:module:: core.models.escn.so3 + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.escn.so3.CoefficientMapping + core.models.escn.so3.SO3_Embedding + core.models.escn.so3.SO3_Rotation + core.models.escn.so3.SO3_Grid + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.escn.so3._Jd + + +.. py:data:: _Jd + + + +.. py:class:: CoefficientMapping(lmax_list: list[int], mmax_list: list[int], device) + + + Helper functions for coefficients used to reshape l<-->m and to get coefficients of specific degree or order + + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + :param mmax_list (list: int): List of maximum order of the spherical harmonics + :param device: Device of the output + + .. py:method:: complex_idx(m, lmax: int = -1) + + + .. py:method:: coefficient_idx(lmax: int, mmax: int) -> torch.Tensor + + + +.. py:class:: SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype) + + + Bases: :py:obj:`torch.nn.Module` + + Helper functions for irreps embedding + + :param length: Batch size + :type length: int + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + :param num_channels: Number of channels + :type num_channels: int + :param device: Device of the output + :param dtype: type of the output tensors + + .. py:method:: clone() -> SO3_Embedding + + + .. py:method:: set_embedding(embedding) -> None + + + .. py:method:: set_lmax_mmax(lmax_list, mmax_list) -> None + + + .. py:method:: _expand_edge(edge_index) -> None + + + .. py:method:: expand_edge(edge_index) -> SO3_Embedding + + + .. py:method:: _reduce_edge(edge_index, num_nodes: int) -> None + + + .. py:method:: _m_primary(mapping) -> None + + + .. py:method:: _l_primary(mapping) -> None + + + .. py:method:: _rotate(SO3_rotation, lmax_list, mmax_list) -> None + + + .. py:method:: _rotate_inv(SO3_rotation, mappingReduced) -> None + + + .. py:method:: _grid_act(SO3_grid, act, mappingReduced) -> None + + + .. py:method:: to_grid(SO3_grid, lmax: int = -1) -> torch.Tensor + + + .. py:method:: _from_grid(x_grid, SO3_grid, lmax: int = -1) -> None + + + +.. py:class:: SO3_Rotation(rot_mat3x3: torch.Tensor, lmax: list[int]) + + + Bases: :py:obj:`torch.nn.Module` + + Helper functions for Wigner-D rotations + + :param rot_mat3x3: Rotation matrix + :type rot_mat3x3: tensor + :param lmax_list (list: int): List of maximum degree of the spherical harmonics + + .. py:method:: set_lmax(lmax) -> None + + + .. py:method:: rotate(embedding, out_lmax, out_mmax) -> torch.Tensor + + + .. py:method:: rotate_inv(embedding, in_lmax, in_mmax) -> torch.Tensor + + + .. py:method:: RotationToWignerDMatrix(edge_rot_mat: torch.Tensor, start_lmax: int, end_lmax: int) -> torch.Tensor + + + .. py:method:: wigner_D(lval, alpha, beta, gamma) + + + .. py:method:: _z_rot_mat(angle: torch.Tensor, lv: int) -> torch.Tensor + + + +.. py:class:: SO3_Grid(lmax: int, mmax: int) + + + Bases: :py:obj:`torch.nn.Module` + + Helper functions for grid representation of the irreps + + :param lmax: Maximum degree of the spherical harmonics + :type lmax: int + :param mmax: Maximum order of the spherical harmonics + :type mmax: int + + .. py:method:: _initialize(device: torch.device) -> None + + + .. py:method:: get_to_grid_mat(device: torch.device) + + + .. py:method:: get_from_grid_mat(device: torch.device) + + + .. py:method:: to_grid(embedding: torch.Tensor, lmax: int, mmax: int) -> torch.Tensor + + + .. py:method:: from_grid(grid: torch.Tensor, lmax: int, mmax: int) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/gemnet/gemnet/index.rst b/_sources/autoapi/core/models/gemnet/gemnet/index.rst new file mode 100644 index 000000000..77ef7f910 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/gemnet/index.rst @@ -0,0 +1,127 @@ +:py:mod:`core.models.gemnet.gemnet` +=================================== + +.. py:module:: core.models.gemnet.gemnet + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.gemnet.GemNetT + + + + +.. py:class:: GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + GemNet-T, triplets-only variant of GemNet + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param cutoff: Embedding cutoff for interactomic directions in Angstrom. + :type cutoff: float + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the cosine basis function. + :type cbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the json file containing the scaling factors. + :type scale_file: str + + .. py:property:: num_params + + + .. py:method:: get_triplets(edge_index, num_atoms) + + Get all b->a for each edge c->a. + It is possible that b=c, as long as the edges are distinct. + + :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c + * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c + * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) -> torch.Tensor + + + .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) + + Reorder edges to make finding counter-directional edges easier. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. Since we only use i->j + edges here, we lose some j->i edges and add others by + making it symmetric. + We could fix this by merging edge_index with its counter-edges, + including the cell_offsets, and then running torch.unique. + But this does not seem worth it. + + + .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) + + + .. py:method:: generate_interaction_graph(data) + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet/index.rst b/_sources/autoapi/core/models/gemnet/index.rst new file mode 100644 index 000000000..cba947b66 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/index.rst @@ -0,0 +1,139 @@ +:py:mod:`core.models.gemnet` +============================ + +.. py:module:: core.models.gemnet + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + layers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + gemnet/index.rst + initializers/index.rst + utils/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.GemNetT + + + + +.. py:class:: GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + GemNet-T, triplets-only variant of GemNet + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param cutoff: Embedding cutoff for interactomic directions in Angstrom. + :type cutoff: float + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the cosine basis function. + :type cbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the json file containing the scaling factors. + :type scale_file: str + + .. py:property:: num_params + + + .. py:method:: get_triplets(edge_index, num_atoms) + + Get all b->a for each edge c->a. + It is possible that b=c, as long as the edges are distinct. + + :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c + * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c + * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) -> torch.Tensor + + + .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) + + Reorder edges to make finding counter-directional edges easier. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. Since we only use i->j + edges here, we lose some j->i edges and add others by + making it symmetric. + We could fix this by merging edge_index with its counter-edges, + including the cell_offsets, and then running torch.unique. + But this does not seem worth it. + + + .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) + + + .. py:method:: generate_interaction_graph(data) + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet/initializers/index.rst b/_sources/autoapi/core/models/gemnet/initializers/index.rst new file mode 100644 index 000000000..0dc720337 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/initializers/index.rst @@ -0,0 +1,43 @@ +:py:mod:`core.models.gemnet.initializers` +========================================= + +.. py:module:: core.models.gemnet.initializers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.initializers._standardize + core.models.gemnet.initializers.he_orthogonal_init + + + +.. py:function:: _standardize(kernel) + + Makes sure that N*Var(W) = 1 and E[W] = 0 + + +.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor + + Generate a weight matrix with variance according to He (Kaiming) initialization. + Based on a random (semi-)orthogonal matrix neural networks + are expected to learn better when features are decorrelated + (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", + "Dropout: a simple way to prevent neural networks from overfitting", + "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") + + diff --git a/_sources/autoapi/core/models/gemnet/layers/atom_update_block/index.rst b/_sources/autoapi/core/models/gemnet/layers/atom_update_block/index.rst new file mode 100644 index 000000000..c7a1ae3b6 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/atom_update_block/index.rst @@ -0,0 +1,88 @@ +:py:mod:`core.models.gemnet.layers.atom_update_block` +===================================================== + +.. py:module:: core.models.gemnet.layers.atom_update_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.atom_update_block.AtomUpdateBlock + core.models.gemnet.layers.atom_update_block.OutputBlock + + + + +.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None, name: str = 'atom_update') + + + Bases: :py:obj:`torch.nn.Module` + + Aggregate the message embeddings of the atoms + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_atom: Embedding size of the edges. + :type emb_size_atom: int + :param nHidden: Number of residual blocks. + :type nHidden: int + :param activation: Name of the activation function to use in the dense layers. + :type activation: callable/str + + .. py:method:: get_mlp(units_in, units, nHidden, activation) + + + .. py:method:: forward(h, m, rbf, id_j) + + :returns: **h** -- Atom embedding. + :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) + + + +.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation=None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs) + + + Bases: :py:obj:`AtomUpdateBlock` + + Combines the atom update block and subsequent final dense layer. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_atom: Embedding size of the edges. + :type emb_size_atom: int + :param nHidden: Number of residual blocks. + :type nHidden: int + :param num_targets: Number of targets. + :type num_targets: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + :param direct_forces: If true directly predict forces without taking the gradient of the energy potential. + :type direct_forces: bool + :param output_init: Kernel initializer of the final dense layer. + :type output_init: int + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(h, m, rbf, id_j) + + :returns: * **(E, F)** (*tuple*) + * **- E** (*torch.Tensor, shape=(nAtoms, num_targets)*) + * **- F** (*torch.Tensor, shape=(nEdges, num_targets)*) + * *Energy and force prediction* + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/base_layers/index.rst b/_sources/autoapi/core/models/gemnet/layers/base_layers/index.rst new file mode 100644 index 000000000..00868610d --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/base_layers/index.rst @@ -0,0 +1,149 @@ +:py:mod:`core.models.gemnet.layers.base_layers` +=============================================== + +.. py:module:: core.models.gemnet.layers.base_layers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.base_layers.Dense + core.models.gemnet.layers.base_layers.ScaledSiLU + core.models.gemnet.layers.base_layers.SiQU + core.models.gemnet.layers.base_layers.ResidualLayer + + + + +.. py:class:: Dense(in_features, out_features, bias: bool = False, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Combines dense layer with scaling for swish activation. + + :param units: Output embedding size. + :type units: int + :param activation: Name of the activation function to use. + :type activation: str + :param bias: True if use bias. + :type bias: bool + + .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None + + + .. py:method:: forward(x) + + + +.. py:class:: ScaledSiLU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x) + + + +.. py:class:: SiQU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x) + + + +.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) + + + Bases: :py:obj:`torch.nn.Module` + + Residual block with output scaled by 1/sqrt(2). + + :param units: Output embedding size. + :type units: int + :param nLayers: Number of dense layers. + :type nLayers: int + :param layer_kwargs: Keyword arguments for initializing the layers. + :type layer_kwargs: str + + .. py:method:: forward(input) + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/basis_utils/index.rst b/_sources/autoapi/core/models/gemnet/layers/basis_utils/index.rst new file mode 100644 index 000000000..217afd5a0 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/basis_utils/index.rst @@ -0,0 +1,115 @@ +:py:mod:`core.models.gemnet.layers.basis_utils` +=============================================== + +.. py:module:: core.models.gemnet.layers.basis_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.basis_utils.Jn + core.models.gemnet.layers.basis_utils.Jn_zeros + core.models.gemnet.layers.basis_utils.spherical_bessel_formulas + core.models.gemnet.layers.basis_utils.bessel_basis + core.models.gemnet.layers.basis_utils.sph_harm_prefactor + core.models.gemnet.layers.basis_utils.associated_legendre_polynomials + core.models.gemnet.layers.basis_utils.real_sph_harm + + + +.. py:function:: Jn(r: float, n: int) + + numerical spherical bessel functions of order n + + +.. py:function:: Jn_zeros(n: int, k: int) + + Compute the first k zeros of the spherical bessel functions up to order n (excluded) + + +.. py:function:: spherical_bessel_formulas(n: int) + + Computes the sympy formulas for the spherical bessel functions up to order n (excluded) + + +.. py:function:: bessel_basis(n: int, k: int) + + Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to + order n (excluded) and maximum frequency k (excluded). + + :returns: + + list + Bessel basis formulas taking in a single argument x. + Has length n where each element has length k. -> In total n*k many. + :rtype: bess_basis + + +.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) + + Computes the constant pre-factor for the spherical harmonic of degree l and order m. + + :param l_degree: Degree of the spherical harmonic. l >= 0 + :type l_degree: int + :param m_order: Order of the spherical harmonic. -l <= m <= l + :type m_order: int + + :returns: **factor** + :rtype: float + + +.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) + + Computes string formulas of the associated legendre polynomials up to degree L (excluded). + + :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials (degree L is excluded). + :type L_maxdegree: int + :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. + :type zero_m_only: bool + :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only. + :type pos_m_only: bool + + :returns: **polynomials** -- Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many). + :rtype: list + + +.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) + + Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). + Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. + + :param L_maxdegree: Degree up to which to calculate the spherical harmonics (degree L is excluded). + :type L_maxdegree: int + :param use_theta: + - True: Expects the input of the formula strings to contain theta. + - False: Expects the input of the formula strings to contain z. + :type use_theta: bool + :param use_phi: + - True: Expects the input of the formula strings to contain phi. + - False: Expects the input of the formula strings to contain x and y. + Does nothing if zero_m_only is True + :type use_phi: bool + :param zero_m_only: If True only calculate the harmonics where m=0. + :type zero_m_only: bool + + :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical harmonics up + to degree L (where degree L is not excluded). + In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then + the total count is reduced to be only L many. + :rtype: list + + diff --git a/_sources/autoapi/core/models/gemnet/layers/efficient/index.rst b/_sources/autoapi/core/models/gemnet/layers/efficient/index.rst new file mode 100644 index 000000000..cc8f297eb --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/efficient/index.rst @@ -0,0 +1,85 @@ +:py:mod:`core.models.gemnet.layers.efficient` +============================================= + +.. py:module:: core.models.gemnet.layers.efficient + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.efficient.EfficientInteractionDownProjection + core.models.gemnet.layers.efficient.EfficientInteractionBilinear + + + + +.. py:class:: EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int) + + + Bases: :py:obj:`torch.nn.Module` + + Down projection in the efficient reformulation. + + :param emb_size_interm: Intermediate embedding size (down-projection size). + :type emb_size_interm: int + :param kernel_initializer: Initializer of the weight matrix. + :type kernel_initializer: callable + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(rbf, sph, id_ca, id_ragged_idx) + + :param rbf: + :type rbf: torch.Tensor, shape=(1, nEdges, num_radial) + :param sph: + :type sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) + :param id_ca: + :param id_ragged_idx: + + :returns: * **rbf_W1** (*torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)*) + * **sph** (*torch.Tensor, shape=(nEdges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges + + + +.. py:class:: EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int) + + + Bases: :py:obj:`torch.nn.Module` + + Efficient reformulation of the bilinear layer and subsequent summation. + + :param units_out: Embedding output size of the bilinear layer. + :type units_out: int + :param kernel_initializer: Initializer of the weight matrix. + :type kernel_initializer: callable + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(basis, m, id_reduce, id_ragged_idx) -> torch.Tensor + + :param basis: + :param m: + :type m: quadruplets: m = m_db , triplets: m = m_ba + :param id_reduce: + :param id_ragged_idx: + + :returns: **m_ca** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, units_out) + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst new file mode 100644 index 000000000..72d634204 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst @@ -0,0 +1,70 @@ +:py:mod:`core.models.gemnet.layers.embedding_block` +=================================================== + +.. py:module:: core.models.gemnet.layers.embedding_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.embedding_block.AtomEmbedding + core.models.gemnet.layers.embedding_block.EdgeEmbedding + + + + +.. py:class:: AtomEmbedding(emb_size, num_elements: int) + + + Bases: :py:obj:`torch.nn.Module` + + Initial atom embeddings based on the atom type + + :param emb_size: Atom embeddings size + :type emb_size: int + + .. py:method:: forward(Z) + + :returns: **h** -- Atom embeddings. + :rtype: torch.Tensor, shape=(nAtoms, emb_size) + + + +.. py:class:: EdgeEmbedding(atom_features, edge_features, out_features, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Edge embedding based on the concatenation of atom embeddings and subsequent dense layer. + + :param emb_size: Embedding size after the dense layer. + :type emb_size: int + :param activation: Activation function used in the dense layer. + :type activation: str + + .. py:method:: forward(h, m_rbf, idx_s, idx_t) + + :param h: + :param m_rbf: in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st + :type m_rbf: shape (nEdges, nFeatures) + :param idx_s: + :param idx_t: + + :returns: **m_st** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, emb_size) + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/index.rst b/_sources/autoapi/core/models/gemnet/layers/index.rst new file mode 100644 index 000000000..f9b4995a6 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/index.rst @@ -0,0 +1,22 @@ +:py:mod:`core.models.gemnet.layers` +=================================== + +.. py:module:: core.models.gemnet.layers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + atom_update_block/index.rst + base_layers/index.rst + basis_utils/index.rst + efficient/index.rst + embedding_block/index.rst + interaction_block/index.rst + radial_basis/index.rst + spherical_basis/index.rst + + diff --git a/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst new file mode 100644 index 000000000..a72d83836 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst @@ -0,0 +1,92 @@ +:py:mod:`core.models.gemnet.layers.interaction_block` +===================================================== + +.. py:module:: core.models.gemnet.layers.interaction_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.interaction_block.InteractionBlockTripletsOnly + core.models.gemnet.layers.interaction_block.TripletInteraction + + + + +.. py:class:: InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction') + + + Bases: :py:obj:`torch.nn.Module` + + Interaction block for GemNet-T/dT. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + + .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t) + + :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. + * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). + + + +.. py:class:: TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs) + + + Bases: :py:obj:`torch.nn.Module` + + Triplet-based message passing block. + + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf. + :type emb_size_trip: int + :param emb_size_bilinear: Embedding size of the edge embeddings after the bilinear layer. + :type emb_size_bilinear: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + + .. py:method:: forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca) + + :returns: **m** -- Edge embeddings (c->a). + :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst new file mode 100644 index 000000000..b2e787fff --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst @@ -0,0 +1,114 @@ +:py:mod:`core.models.gemnet.layers.radial_basis` +================================================ + +.. py:module:: core.models.gemnet.layers.radial_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.radial_basis.PolynomialEnvelope + core.models.gemnet.layers.radial_basis.ExponentialEnvelope + core.models.gemnet.layers.radial_basis.SphericalBesselBasis + core.models.gemnet.layers.radial_basis.BernsteinBasis + core.models.gemnet.layers.radial_basis.RadialBasis + + + + +.. py:class:: PolynomialEnvelope(exponent: int) + + + Bases: :py:obj:`torch.nn.Module` + + Polynomial envelope function that ensures a smooth cutoff. + + :param exponent: Exponent of the envelope function. + :type exponent: int + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: ExponentialEnvelope + + + Bases: :py:obj:`torch.nn.Module` + + Exponential envelope function that ensures a smooth cutoff, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) + + + Bases: :py:obj:`torch.nn.Module` + + 1D spherical Bessel basis + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) + + + Bases: :py:obj:`torch.nn.Module` + + Bernstein polynomial basis, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param pregamma_initial: Initial value of exponential coefficient gamma. + Default: gamma = 0.5 * a_0**-1 = 0.94486, + inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 + :type pregamma_initial: float + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + :param rbf: Basis function and its hyperparameters. + :type rbf: dict = {"name": "gaussian"} + :param envelope: Envelope function and its hyperparameters. + :type envelope: dict = {"name": "polynomial", "exponent": 5} + + .. py:method:: forward(d) + + + diff --git a/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst new file mode 100644 index 000000000..1e8da62c6 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst @@ -0,0 +1,47 @@ +:py:mod:`core.models.gemnet.layers.spherical_basis` +=================================================== + +.. py:module:: core.models.gemnet.layers.spherical_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.layers.spherical_basis.CircularBasisLayer + + + + +.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet.layers.radial_basis.RadialBasis, cbf, efficient: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + 2D Fourier Bessel Basis + + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param radial_basis: Radial basis functions + :type radial_basis: RadialBasis + :param cbf: Name and hyperparameters of the cosine basis function + :type cbf: dict + :param efficient: Whether to use the "efficient" summation order + :type efficient: bool + + .. py:method:: forward(D_ca, cosφ_cab, id3_ca) + + + diff --git a/_sources/autoapi/core/models/gemnet/utils/index.rst b/_sources/autoapi/core/models/gemnet/utils/index.rst new file mode 100644 index 000000000..9ef15740a --- /dev/null +++ b/_sources/autoapi/core/models/gemnet/utils/index.rst @@ -0,0 +1,123 @@ +:py:mod:`core.models.gemnet.utils` +================================== + +.. py:module:: core.models.gemnet.utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet.utils.read_json + core.models.gemnet.utils.update_json + core.models.gemnet.utils.write_json + core.models.gemnet.utils.read_value_json + core.models.gemnet.utils.ragged_range + core.models.gemnet.utils.repeat_blocks + core.models.gemnet.utils.calculate_interatomic_vectors + core.models.gemnet.utils.inner_product_normalized + core.models.gemnet.utils.mask_neighbors + + + +.. py:function:: read_json(path: str) + + +.. py:function:: update_json(path: str, data) -> None + + +.. py:function:: write_json(path: str, data) -> None + + +.. py:function:: read_value_json(path: str, key: str) + + +.. py:function:: ragged_range(sizes: torch.Tensor) -> torch.Tensor + + Multiple concatenated ranges. + + .. rubric:: Examples + + sizes = [1 4 2 3] + Return: [0 0 1 2 3 0 1 0 1 2] + + +.. py:function:: repeat_blocks(sizes: torch.Tensor, repeats: int | torch.Tensor, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor + + Repeat blocks of indices. + Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements + + continuous_indexing: Whether to keep increasing the index after each block + start_idx: Starting index + block_inc: Number to increment by after each block, + either global or per block. Shape: len(sizes) - 1 + repeat_inc: Number to increment by after each repetition, + either global or per block + + .. rubric:: Examples + + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False + Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + repeat_inc = 4 + Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + start_idx = 5 + Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + block_inc = 1 + Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] + sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 1 2 0 1 2 3 4 3 4 3 4] + sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True + Return: [0 1 0 1 5 6 5 6] + + +.. py:function:: calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor] + + Calculate the vectors connecting the given atom pairs, + considering offsets from periodic boundary conditions (PBC). + + :param R: Atom positions. + :type R: Tensor, shape = (nAtoms, 3) + :param id_s: Indices of the source atom of the edges. + :type id_s: Tensor, shape = (nEdges,) + :param id_t: Indices of the target atom of the edges. + :type id_t: Tensor, shape = (nEdges,) + :param offsets_st: PBC offsets of the edges. + Subtract this from the correct direction. + :type offsets_st: Tensor, shape = (nEdges,) + + :returns: **(D_st, V_st)** -- + + D_st: Tensor, shape = (nEdges,) + Distance from atom t to s. + V_st: Tensor, shape = (nEdges,) + Unit direction from atom t to s. + :rtype: tuple + + +.. py:function:: inner_product_normalized(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor + + Calculate the inner product between the given normalized vectors, + giving a result between -1 and 1. + + +.. py:function:: mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) -> torch.Tensor + + diff --git a/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst b/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst new file mode 100644 index 000000000..4152b8351 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst @@ -0,0 +1,127 @@ +:py:mod:`core.models.gemnet_gp.gemnet` +====================================== + +.. py:module:: core.models.gemnet_gp.gemnet + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.gemnet.GraphParallelGemNetT + + + + +.. py:class:: GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + GemNet-T, triplets-only variant of GemNet + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param cutoff: Embedding cutoff for interactomic directions in Angstrom. + :type cutoff: float + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the cosine basis function. + :type cbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the json file containing the scaling factors. + :type scale_file: str + + .. py:property:: num_params + + + .. py:method:: get_triplets(edge_index, num_atoms) + + Get all b->a for each edge c->a. + It is possible that b=c, as long as the edges are distinct. + + :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c + * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c + * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor + + + .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) + + Reorder edges to make finding counter-directional edges easier. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. Since we only use i->j + edges here, we lose some j->i edges and add others by + making it symmetric. + We could fix this by merging edge_index with its counter-edges, + including the cell_offsets, and then running torch.unique. + But this does not seem worth it. + + + .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) + + + .. py:method:: generate_interaction_graph(data) + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/index.rst b/_sources/autoapi/core/models/gemnet_gp/index.rst new file mode 100644 index 000000000..7b9a08caf --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/index.rst @@ -0,0 +1,139 @@ +:py:mod:`core.models.gemnet_gp` +=============================== + +.. py:module:: core.models.gemnet_gp + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + layers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + gemnet/index.rst + initializers/index.rst + utils/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.GraphParallelGemNetT + + + + +.. py:class:: GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + GemNet-T, triplets-only variant of GemNet + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param cutoff: Embedding cutoff for interactomic directions in Angstrom. + :type cutoff: float + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the cosine basis function. + :type cbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the json file containing the scaling factors. + :type scale_file: str + + .. py:property:: num_params + + + .. py:method:: get_triplets(edge_index, num_atoms) + + Get all b->a for each edge c->a. + It is possible that b=c, as long as the edges are distinct. + + :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c + * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c + * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor + + + .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) + + Reorder edges to make finding counter-directional edges easier. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. Since we only use i->j + edges here, we lose some j->i edges and add others by + making it symmetric. + We could fix this by merging edge_index with its counter-edges, + including the cell_offsets, and then running torch.unique. + But this does not seem worth it. + + + .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) + + + .. py:method:: generate_interaction_graph(data) + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/initializers/index.rst b/_sources/autoapi/core/models/gemnet_gp/initializers/index.rst new file mode 100644 index 000000000..aa83f068c --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/initializers/index.rst @@ -0,0 +1,43 @@ +:py:mod:`core.models.gemnet_gp.initializers` +============================================ + +.. py:module:: core.models.gemnet_gp.initializers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.initializers._standardize + core.models.gemnet_gp.initializers.he_orthogonal_init + + + +.. py:function:: _standardize(kernel) + + Makes sure that N*Var(W) = 1 and E[W] = 0 + + +.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor + + Generate a weight matrix with variance according to He (Kaiming) initialization. + Based on a random (semi-)orthogonal matrix neural networks + are expected to learn better when features are decorrelated + (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", + "Dropout: a simple way to prevent neural networks from overfitting", + "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.rst new file mode 100644 index 000000000..d8002d861 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.rst @@ -0,0 +1,116 @@ +:py:mod:`core.models.gemnet_gp.layers.atom_update_block` +======================================================== + +.. py:module:: core.models.gemnet_gp.layers.atom_update_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.atom_update_block.AtomUpdateBlock + core.models.gemnet_gp.layers.atom_update_block.OutputBlock + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.atom_update_block.scatter_sum + + + +.. py:function:: scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1, out: torch.Tensor | None = None, dim_size: int | None = None) -> torch.Tensor + + Clone of torch_scatter.scatter_sum but without in-place operations + + +.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation: str | None = None, name: str = 'atom_update') + + + Bases: :py:obj:`torch.nn.Module` + + Aggregate the message embeddings of the atoms + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_atom: Embedding size of the edges. + :type emb_size_atom: int + :param nHidden: Number of residual blocks. + :type nHidden: int + :param activation: Name of the activation function to use in the dense layers. + :type activation: callable/str + + .. py:method:: get_mlp(units_in: int, units: int, nHidden: int, activation: str | None) + + + .. py:method:: forward(nAtoms: int, m: int, rbf, id_j) + + :returns: **h** -- Atom embedding. + :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) + + + +.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation: str | None = None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs) + + + Bases: :py:obj:`AtomUpdateBlock` + + Combines the atom update block and subsequent final dense layer. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_atom: Embedding size of the edges. + :type emb_size_atom: int + :param nHidden: Number of residual blocks. + :type nHidden: int + :param num_targets: Number of targets. + :type num_targets: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + :param direct_forces: If true directly predict forces without taking the gradient of the energy potential. + :type direct_forces: bool + :param output_init: Kernel initializer of the final dense layer. + :type output_init: int + + .. py:attribute:: dense_rbf_F + :type: core.models.gemnet_gp.layers.base_layers.Dense + + + + .. py:attribute:: out_forces + :type: core.models.gemnet_gp.layers.base_layers.Dense + + + + .. py:attribute:: out_energy + :type: core.models.gemnet_gp.layers.base_layers.Dense + + + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(nAtoms: int, m, rbf, id_j: torch.Tensor) + + :returns: * **(E, F)** (*tuple*) + * **- E** (*torch.Tensor, shape=(nAtoms, num_targets)*) + * **- F** (*torch.Tensor, shape=(nEdges, num_targets)*) + * *Energy and force prediction* + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/base_layers/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/base_layers/index.rst new file mode 100644 index 000000000..fc2d8f9ed --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/base_layers/index.rst @@ -0,0 +1,149 @@ +:py:mod:`core.models.gemnet_gp.layers.base_layers` +================================================== + +.. py:module:: core.models.gemnet_gp.layers.base_layers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.base_layers.Dense + core.models.gemnet_gp.layers.base_layers.ScaledSiLU + core.models.gemnet_gp.layers.base_layers.SiQU + core.models.gemnet_gp.layers.base_layers.ResidualLayer + + + + +.. py:class:: Dense(num_in_features: int, num_out_features: int, bias: bool = False, activation: str | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Combines dense layer with scaling for swish activation. + + :param units: Output embedding size. + :type units: int + :param activation: Name of the activation function to use. + :type activation: str + :param bias: True if use bias. + :type bias: bool + + .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None + + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: ScaledSiLU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: SiQU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) + + + Bases: :py:obj:`torch.nn.Module` + + Residual block with output scaled by 1/sqrt(2). + + :param units: Output embedding size. + :type units: int + :param nLayers: Number of dense layers. + :type nLayers: int + :param layer_kwargs: Keyword arguments for initializing the layers. + :type layer_kwargs: str + + .. py:method:: forward(input: torch.Tensor) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/basis_utils/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/basis_utils/index.rst new file mode 100644 index 000000000..2de46d705 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/basis_utils/index.rst @@ -0,0 +1,115 @@ +:py:mod:`core.models.gemnet_gp.layers.basis_utils` +================================================== + +.. py:module:: core.models.gemnet_gp.layers.basis_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.basis_utils.Jn + core.models.gemnet_gp.layers.basis_utils.Jn_zeros + core.models.gemnet_gp.layers.basis_utils.spherical_bessel_formulas + core.models.gemnet_gp.layers.basis_utils.bessel_basis + core.models.gemnet_gp.layers.basis_utils.sph_harm_prefactor + core.models.gemnet_gp.layers.basis_utils.associated_legendre_polynomials + core.models.gemnet_gp.layers.basis_utils.real_sph_harm + + + +.. py:function:: Jn(r: float, n: int) + + numerical spherical bessel functions of order n + + +.. py:function:: Jn_zeros(n: int, k: int) + + Compute the first k zeros of the spherical bessel functions up to order n (excluded) + + +.. py:function:: spherical_bessel_formulas(n) + + Computes the sympy formulas for the spherical bessel functions up to order n (excluded) + + +.. py:function:: bessel_basis(n: int, k: int) + + Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to + order n (excluded) and maximum frequency k (excluded). + + :returns: + + list + Bessel basis formulas taking in a single argument x. + Has length n where each element has length k. -> In total n*k many. + :rtype: bess_basis + + +.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) -> float + + Computes the constant pre-factor for the spherical harmonic of degree l and order m. + + :param l_degree: Degree of the spherical harmonic. l >= 0 + :type l_degree: int + :param m_order: Order of the spherical harmonic. -l <= m <= l + :type m_order: int + + :returns: **factor** + :rtype: float + + +.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) + + Computes string formulas of the associated legendre polynomials up to degree L (excluded). + + :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials (degree L is excluded). + :type L_maxdegree: int + :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. + :type zero_m_only: bool + :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only. + :type pos_m_only: bool + + :returns: **polynomials** -- Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many). + :rtype: list + + +.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) + + Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). + Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. + + :param L_maxdegree: Degree up to which to calculate the spherical harmonics (degree L is excluded). + :type L_maxdegree: int + :param use_theta: + - True: Expects the input of the formula strings to contain theta. + - False: Expects the input of the formula strings to contain z. + :type use_theta: bool + :param use_phi: + - True: Expects the input of the formula strings to contain phi. + - False: Expects the input of the formula strings to contain x and y. + Does nothing if zero_m_only is True + :type use_phi: bool + :param zero_m_only: If True only calculate the harmonics where m=0. + :type zero_m_only: bool + + :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical harmonics up + to degree L (where degree L is not excluded). + In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then + the total count is reduced to be only L many. + :rtype: list + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/efficient/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/efficient/index.rst new file mode 100644 index 000000000..e704ea672 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/efficient/index.rst @@ -0,0 +1,85 @@ +:py:mod:`core.models.gemnet_gp.layers.efficient` +================================================ + +.. py:module:: core.models.gemnet_gp.layers.efficient + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.efficient.EfficientInteractionDownProjection + core.models.gemnet_gp.layers.efficient.EfficientInteractionBilinear + + + + +.. py:class:: EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int) + + + Bases: :py:obj:`torch.nn.Module` + + Down projection in the efficient reformulation. + + :param emb_size_interm: Intermediate embedding size (down-projection size). + :type emb_size_interm: int + :param kernel_initializer: Initializer of the weight matrix. + :type kernel_initializer: callable + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(rbf: torch.Tensor, sph: torch.Tensor, id_ca, id_ragged_idx, Kmax: int) -> tuple[torch.Tensor, torch.Tensor] + + :param rbf: + :type rbf: torch.Tensor, shape=(1, nEdges, num_radial) + :param sph: + :type sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) + :param id_ca: + :param id_ragged_idx: + + :returns: * **rbf_W1** (*torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)*) + * **sph** (*torch.Tensor, shape=(nEdges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges + + + +.. py:class:: EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int) + + + Bases: :py:obj:`torch.nn.Module` + + Efficient reformulation of the bilinear layer and subsequent summation. + + :param units_out: Embedding output size of the bilinear layer. + :type units_out: int + :param kernel_initializer: Initializer of the weight matrix. + :type kernel_initializer: callable + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(basis: tuple[torch.Tensor, torch.Tensor], m, id_reduce, id_ragged_idx, edge_offset, Kmax: int) -> torch.Tensor + + :param basis: + :param m: + :type m: quadruplets: m = m_db , triplets: m = m_ba + :param id_reduce: + :param id_ragged_idx: + + :returns: **m_ca** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, units_out) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst new file mode 100644 index 000000000..cf3c40c84 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst @@ -0,0 +1,70 @@ +:py:mod:`core.models.gemnet_gp.layers.embedding_block` +====================================================== + +.. py:module:: core.models.gemnet_gp.layers.embedding_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.embedding_block.AtomEmbedding + core.models.gemnet_gp.layers.embedding_block.EdgeEmbedding + + + + +.. py:class:: AtomEmbedding(emb_size: int) + + + Bases: :py:obj:`torch.nn.Module` + + Initial atom embeddings based on the atom type + + :param emb_size: Atom embeddings size + :type emb_size: int + + .. py:method:: forward(Z) -> torch.Tensor + + :returns: **h** -- Atom embeddings. + :rtype: torch.Tensor, shape=(nAtoms, emb_size) + + + +.. py:class:: EdgeEmbedding(atom_features: int, edge_features: int, num_out_features: int, activation: str | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Edge embedding based on the concatenation of atom embeddings and subsequent dense layer. + + :param emb_size: Embedding size after the dense layer. + :type emb_size: int + :param activation: Activation function used in the dense layer. + :type activation: str + + .. py:method:: forward(h, m_rbf, idx_s, idx_t) -> torch.Tensor + + :param h: + :param m_rbf: in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st + :type m_rbf: shape (nEdges, nFeatures) + :param idx_s: + :param idx_t: + + :returns: **m_st** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, emb_size) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/index.rst new file mode 100644 index 000000000..395fb489d --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/index.rst @@ -0,0 +1,22 @@ +:py:mod:`core.models.gemnet_gp.layers` +====================================== + +.. py:module:: core.models.gemnet_gp.layers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + atom_update_block/index.rst + base_layers/index.rst + basis_utils/index.rst + efficient/index.rst + embedding_block/index.rst + interaction_block/index.rst + radial_basis/index.rst + spherical_basis/index.rst + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst new file mode 100644 index 000000000..0402cc5c9 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst @@ -0,0 +1,95 @@ +:py:mod:`core.models.gemnet_gp.layers.interaction_block` +======================================================== + +.. py:module:: core.models.gemnet_gp.layers.interaction_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.interaction_block.InteractionBlockTripletsOnly + core.models.gemnet_gp.layers.interaction_block.TripletInteraction + + + + +.. py:class:: InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction') + + + Bases: :py:obj:`torch.nn.Module` + + Interaction block for GemNet-T/dT. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. + :type emb_size_trip: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. + :type emb_size_bil_trip: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + + .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t, edge_offset, Kmax, nAtoms) + + :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. + * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). + * **Node** (*h*) + * **Edge** (*m, rbf3, id_swap, rbf_h, idx_s, idx_t, cbf3[0], cbf3[1] (dense)*) + * **Triplet** (*id3_ragged_idx, id3_ba, id3_ca*) + + + +.. py:class:: TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs) + + + Bases: :py:obj:`torch.nn.Module` + + Triplet-based message passing block. + + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip: (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf. + :type emb_size_trip: int + :param emb_size_bilinear: Embedding size of the edge embeddings after the bilinear layer. + :type emb_size_bilinear: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param activation: Name of the activation function to use in the dense layers except for the final dense layer. + :type activation: str + + .. py:method:: forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, edge_offset, Kmax) + + :returns: **m** -- Edge embeddings (c->a). + :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst new file mode 100644 index 000000000..73236d7f6 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst @@ -0,0 +1,114 @@ +:py:mod:`core.models.gemnet_gp.layers.radial_basis` +=================================================== + +.. py:module:: core.models.gemnet_gp.layers.radial_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.radial_basis.PolynomialEnvelope + core.models.gemnet_gp.layers.radial_basis.ExponentialEnvelope + core.models.gemnet_gp.layers.radial_basis.SphericalBesselBasis + core.models.gemnet_gp.layers.radial_basis.BernsteinBasis + core.models.gemnet_gp.layers.radial_basis.RadialBasis + + + + +.. py:class:: PolynomialEnvelope(exponent: int) + + + Bases: :py:obj:`torch.nn.Module` + + Polynomial envelope function that ensures a smooth cutoff. + + :param exponent: Exponent of the envelope function. + :type exponent: int + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: ExponentialEnvelope + + + Bases: :py:obj:`torch.nn.Module` + + Exponential envelope function that ensures a smooth cutoff, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + .. py:method:: forward(d_scaled) -> torch.Tensor + + + +.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) + + + Bases: :py:obj:`torch.nn.Module` + + 1D spherical Bessel basis + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + + .. py:method:: forward(d_scaled) + + + +.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) + + + Bases: :py:obj:`torch.nn.Module` + + Bernstein polynomial basis, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param pregamma_initial: Initial value of exponential coefficient gamma. + Default: gamma = 0.5 * a_0**-1 = 0.94486, + inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 + :type pregamma_initial: float + + .. py:method:: forward(d_scaled) -> torch.Tensor + + + +.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + :param rbf: Basis function and its hyperparameters. + :type rbf: dict = {"name": "gaussian"} + :param envelope: Envelope function and its hyperparameters. + :type envelope: dict = {"name": "polynomial", "exponent": 5} + + .. py:method:: forward(d) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst new file mode 100644 index 000000000..8820ed1b3 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst @@ -0,0 +1,47 @@ +:py:mod:`core.models.gemnet_gp.layers.spherical_basis` +====================================================== + +.. py:module:: core.models.gemnet_gp.layers.spherical_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.layers.spherical_basis.CircularBasisLayer + + + + +.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_gp.layers.radial_basis.RadialBasis, cbf, efficient: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + 2D Fourier Bessel Basis + + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param radial_basis: Radial basis functions + :type radial_basis: RadialBasis + :param cbf: Name and hyperparameters of the cosine basis function + :type cbf: dict + :param efficient: Whether to use the "efficient" summation order + :type efficient: bool + + .. py:method:: forward(D_ca, cosφ_cab, id3_ca) + + + diff --git a/_sources/autoapi/core/models/gemnet_gp/utils/index.rst b/_sources/autoapi/core/models/gemnet_gp/utils/index.rst new file mode 100644 index 000000000..1ba511bda --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_gp/utils/index.rst @@ -0,0 +1,123 @@ +:py:mod:`core.models.gemnet_gp.utils` +===================================== + +.. py:module:: core.models.gemnet_gp.utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_gp.utils.read_json + core.models.gemnet_gp.utils.update_json + core.models.gemnet_gp.utils.write_json + core.models.gemnet_gp.utils.read_value_json + core.models.gemnet_gp.utils.ragged_range + core.models.gemnet_gp.utils.repeat_blocks + core.models.gemnet_gp.utils.calculate_interatomic_vectors + core.models.gemnet_gp.utils.inner_product_normalized + core.models.gemnet_gp.utils.mask_neighbors + + + +.. py:function:: read_json(path: str) + + +.. py:function:: update_json(path: str, data) -> None + + +.. py:function:: write_json(path: str, data) -> None + + +.. py:function:: read_value_json(path: str, key) + + +.. py:function:: ragged_range(sizes) + + Multiple concatenated ranges. + + .. rubric:: Examples + + sizes = [1 4 2 3] + Return: [0 0 1 2 3 0 1 0 1 2] + + +.. py:function:: repeat_blocks(sizes: torch.Tensor, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor + + Repeat blocks of indices. + Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements + + continuous_indexing: Whether to keep increasing the index after each block + start_idx: Starting index + block_inc: Number to increment by after each block, + either global or per block. Shape: len(sizes) - 1 + repeat_inc: Number to increment by after each repetition, + either global or per block + + .. rubric:: Examples + + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False + Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + repeat_inc = 4 + Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + start_idx = 5 + Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + block_inc = 1 + Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] + sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 1 2 0 1 2 3 4 3 4 3 4] + sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True + Return: [0 1 0 1 5 6 5 6] + + +.. py:function:: calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor | None) -> tuple[torch.Tensor, torch.Tensor] + + Calculate the vectors connecting the given atom pairs, + considering offsets from periodic boundary conditions (PBC). + + :param R: Atom positions. + :type R: Tensor, shape = (nAtoms, 3) + :param id_s: Indices of the source atom of the edges. + :type id_s: Tensor, shape = (nEdges,) + :param id_t: Indices of the target atom of the edges. + :type id_t: Tensor, shape = (nEdges,) + :param offsets_st: PBC offsets of the edges. + Subtract this from the correct direction. + :type offsets_st: Tensor, shape = (nEdges,) + + :returns: **(D_st, V_st)** -- + + D_st: Tensor, shape = (nEdges,) + Distance from atom t to s. + V_st: Tensor, shape = (nEdges,) + Unit direction from atom t to s. + :rtype: tuple + + +.. py:function:: inner_product_normalized(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor + + Calculate the inner product between the given normalized vectors, + giving a result between -1 and 1. + + +.. py:function:: mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) + + diff --git a/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst b/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst new file mode 100644 index 000000000..f6c1eeb67 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst @@ -0,0 +1,249 @@ +:py:mod:`core.models.gemnet_oc.gemnet_oc` +========================================= + +.. py:module:: core.models.gemnet_oc.gemnet_oc + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.gemnet_oc.GemNetOC + + + + +.. py:class:: GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_trip_in: int + :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_trip_out: int + :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_quad_in: int + :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_quad_out: int + :param emb_size_aint_in: Embedding size in the atom interaction before the bilinear layer. + :type emb_size_aint_in: int + :param emb_size_aint_out: Embedding size in the atom interaction after the bilinear layer. + :type emb_size_aint_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). + :type emb_size_sbf: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param num_output_afteratom: Number of residual blocks in the output blocks + after adding the atom embedding. + :type num_output_afteratom: int + :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. + :type num_atom_emb_layers: int + :param num_global_out_layers: Number of final residual blocks before the output. + :type num_global_out_layers: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param use_pbc: Whether to use periodic boundary conditions. + :type use_pbc: bool + :param scale_backprop_forces: Whether to scale up the energy and then scales down the forces + to prevent NaNs and infs in backpropagated forces. + :type scale_backprop_forces: bool + :param cutoff: Embedding cutoff for interatomic connections and embeddings in Angstrom. + :type cutoff: float + :param cutoff_qint: Quadruplet interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + :type cutoff_qint: float + :param cutoff_aeaint: Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + :type cutoff_aeaint: float + :param cutoff_aint: Atom-to-atom interaction cutoff in Angstrom. + Optional. Uses maximum of all other cutoffs per default. + :type cutoff_aint: float + :param max_neighbors: Maximum number of neighbors for interatomic connections and embeddings. + :type max_neighbors: int + :param max_neighbors_qint: Maximum number of quadruplet interactions per embedding. + Optional. Uses max_neighbors per default. + :type max_neighbors_qint: int + :param max_neighbors_aeaint: Maximum number of edge-to-atom and atom-to-edge interactions per embedding. + Optional. Uses max_neighbors per default. + :type max_neighbors_aeaint: int + :param max_neighbors_aint: Maximum number of atom-to-atom interactions per atom. + Optional. Uses maximum of all other neighbors per default. + :type max_neighbors_aint: int + :param enforce_max_neighbors_strictly: When subselected edges based on max_neighbors args, arbitrarily + select amongst degenerate edges to have exactly the correct number. + :type enforce_max_neighbors_strictly: bool + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param rbf_spherical: Name and hyperparameters of the radial basis function used as part of the + circular and spherical bases. + Optional. Uses rbf per default. + :type rbf_spherical: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the circular basis function. + :type cbf: dict + :param sbf: Name and hyperparameters of the spherical basis function. + :type sbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param forces_coupled: If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. + :type forces_coupled: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the pytorch file containing the scaling factors. + :type scale_file: str + :param quad_interaction: Whether to use quadruplet interactions (with dihedral angles) + :type quad_interaction: bool + :param atom_edge_interaction: Whether to use atom-to-edge interactions + :type atom_edge_interaction: bool + :param edge_atom_interaction: Whether to use edge-to-atom interactions + :type edge_atom_interaction: bool + :param atom_interaction: Whether to use atom-to-atom interactions + :type atom_interaction: bool + :param scale_basis: Whether to use a scaling layer in the raw basis function for better + numerical stability. + :type scale_basis: bool + :param qint_tags: Which atom tags to use quadruplet interactions for. + 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. + :type qint_tags: list + + .. py:property:: num_params + :type: int + + + .. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) + + + .. py:method:: set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint) + + + .. py:method:: init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis) + + + .. py:method:: init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf) + + + .. py:method:: calculate_quad_angles(V_st, V_qint_st, quad_idx) + + Calculate angles for quadruplet-based message passing. + + :param V_st: Normalized directions from s to t + :type V_st: Tensor, shape = (nAtoms, 3) + :param V_qint_st: Normalized directions from s to t for the quadruplet + interaction graph + :type V_qint_st: Tensor, shape = (nAtoms, 3) + :param quad_idx: Indices relevant for quadruplet interactions. + :type quad_idx: dict of torch.Tensor + + :returns: * **cosφ_cab** (*Tensor, shape = (num_triplets_inint,)*) -- Cosine of angle between atoms c -> a <- b. + * **cosφ_abd** (*Tensor, shape = (num_triplets_qint,)*) -- Cosine of angle between atoms a -> b -> d. + * **angle_cabd** (*Tensor, shape = (num_quadruplets,)*) -- Dihedral angle between atoms c <- a-b -> d. + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) -> torch.Tensor + + Use a mask to remove values of removed edges and then + duplicate the values for the correct edge direction. + + :param tensor: Values to symmetrize for the new tensor. + :type tensor: torch.Tensor + :param mask: Mask defining which edges go in the correct direction. + :type mask: torch.Tensor + :param reorder_idx: Indices defining how to reorder the tensor values after + concatenating the edge values of both directions. + :type reorder_idx: torch.Tensor + :param opposite_neg: Whether the edge in the opposite direction should use the + negative tensor value. + :type opposite_neg: bool + + :returns: **tensor_ordered** -- A tensor with symmetrized values. + :rtype: torch.Tensor + + + .. py:method:: symmetrize_edges(graph, batch_idx) + + Symmetrize edges to ensure existence of counter-directional edges. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. + We only use i->j edges here. So we lose some j->i edges + and add others by making it symmetric. + + + .. py:method:: subselect_edges(data, graph, cutoff=None, max_neighbors=None) + + Subselect edges using a stricter cutoff and max_neighbors. + + + .. py:method:: generate_graph_dict(data, cutoff, max_neighbors) + + Generate a radius/nearest neighbor graph. + + + .. py:method:: subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig) + + If the new cutoff and max_neighbors is different from the original, + subselect the edges of a given graph. + + + .. py:method:: get_graphs_and_indices(data) + + "Generate embedding and interaction graphs and indices. + + + .. py:method:: get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms) + + Calculate and transform basis functions. + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/index.rst b/_sources/autoapi/core/models/gemnet_oc/index.rst new file mode 100644 index 000000000..a49ad327a --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/index.rst @@ -0,0 +1,263 @@ +:py:mod:`core.models.gemnet_oc` +=============================== + +.. py:module:: core.models.gemnet_oc + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + layers/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + gemnet_oc/index.rst + initializers/index.rst + interaction_indices/index.rst + utils/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.GemNetOC + + + + +.. py:class:: GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + :param num_atoms (int): + :type num_atoms (int): Unused argument + :param bond_feat_dim (int): + :type bond_feat_dim (int): Unused argument + :param num_targets: Number of prediction targets. + :type num_targets: int + :param num_spherical: Controls maximum frequency. + :type num_spherical: int + :param num_radial: Controls maximum frequency. + :type num_radial: int + :param num_blocks: Number of building blocks to be stacked. + :type num_blocks: int + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_trip_in: int + :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_trip_out: int + :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_quad_in: int + :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_quad_out: int + :param emb_size_aint_in: Embedding size in the atom interaction before the bilinear layer. + :type emb_size_aint_in: int + :param emb_size_aint_out: Embedding size in the atom interaction after the bilinear layer. + :type emb_size_aint_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). + :type emb_size_sbf: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param num_output_afteratom: Number of residual blocks in the output blocks + after adding the atom embedding. + :type num_output_afteratom: int + :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. + :type num_atom_emb_layers: int + :param num_global_out_layers: Number of final residual blocks before the output. + :type num_global_out_layers: int + :param regress_forces: Whether to predict forces. Default: True + :type regress_forces: bool + :param direct_forces: If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + :type direct_forces: bool + :param use_pbc: Whether to use periodic boundary conditions. + :type use_pbc: bool + :param scale_backprop_forces: Whether to scale up the energy and then scales down the forces + to prevent NaNs and infs in backpropagated forces. + :type scale_backprop_forces: bool + :param cutoff: Embedding cutoff for interatomic connections and embeddings in Angstrom. + :type cutoff: float + :param cutoff_qint: Quadruplet interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + :type cutoff_qint: float + :param cutoff_aeaint: Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + :type cutoff_aeaint: float + :param cutoff_aint: Atom-to-atom interaction cutoff in Angstrom. + Optional. Uses maximum of all other cutoffs per default. + :type cutoff_aint: float + :param max_neighbors: Maximum number of neighbors for interatomic connections and embeddings. + :type max_neighbors: int + :param max_neighbors_qint: Maximum number of quadruplet interactions per embedding. + Optional. Uses max_neighbors per default. + :type max_neighbors_qint: int + :param max_neighbors_aeaint: Maximum number of edge-to-atom and atom-to-edge interactions per embedding. + Optional. Uses max_neighbors per default. + :type max_neighbors_aeaint: int + :param max_neighbors_aint: Maximum number of atom-to-atom interactions per atom. + Optional. Uses maximum of all other neighbors per default. + :type max_neighbors_aint: int + :param enforce_max_neighbors_strictly: When subselected edges based on max_neighbors args, arbitrarily + select amongst degenerate edges to have exactly the correct number. + :type enforce_max_neighbors_strictly: bool + :param rbf: Name and hyperparameters of the radial basis function. + :type rbf: dict + :param rbf_spherical: Name and hyperparameters of the radial basis function used as part of the + circular and spherical bases. + Optional. Uses rbf per default. + :type rbf_spherical: dict + :param envelope: Name and hyperparameters of the envelope function. + :type envelope: dict + :param cbf: Name and hyperparameters of the circular basis function. + :type cbf: dict + :param sbf: Name and hyperparameters of the spherical basis function. + :type sbf: dict + :param extensive: Whether the output should be extensive (proportional to the number of atoms) + :type extensive: bool + :param forces_coupled: If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. + :type forces_coupled: bool + :param output_init: Initialization method for the final dense layer. + :type output_init: str + :param activation: Name of the activation function. + :type activation: str + :param scale_file: Path to the pytorch file containing the scaling factors. + :type scale_file: str + :param quad_interaction: Whether to use quadruplet interactions (with dihedral angles) + :type quad_interaction: bool + :param atom_edge_interaction: Whether to use atom-to-edge interactions + :type atom_edge_interaction: bool + :param edge_atom_interaction: Whether to use edge-to-atom interactions + :type edge_atom_interaction: bool + :param atom_interaction: Whether to use atom-to-atom interactions + :type atom_interaction: bool + :param scale_basis: Whether to use a scaling layer in the raw basis function for better + numerical stability. + :type scale_basis: bool + :param qint_tags: Which atom tags to use quadruplet interactions for. + 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. + :type qint_tags: list + + .. py:property:: num_params + :type: int + + + .. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) + + + .. py:method:: set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint) + + + .. py:method:: init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis) + + + .. py:method:: init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf) + + + .. py:method:: calculate_quad_angles(V_st, V_qint_st, quad_idx) + + Calculate angles for quadruplet-based message passing. + + :param V_st: Normalized directions from s to t + :type V_st: Tensor, shape = (nAtoms, 3) + :param V_qint_st: Normalized directions from s to t for the quadruplet + interaction graph + :type V_qint_st: Tensor, shape = (nAtoms, 3) + :param quad_idx: Indices relevant for quadruplet interactions. + :type quad_idx: dict of torch.Tensor + + :returns: * **cosφ_cab** (*Tensor, shape = (num_triplets_inint,)*) -- Cosine of angle between atoms c -> a <- b. + * **cosφ_abd** (*Tensor, shape = (num_triplets_qint,)*) -- Cosine of angle between atoms a -> b -> d. + * **angle_cabd** (*Tensor, shape = (num_quadruplets,)*) -- Dihedral angle between atoms c <- a-b -> d. + + + .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) -> torch.Tensor + + Use a mask to remove values of removed edges and then + duplicate the values for the correct edge direction. + + :param tensor: Values to symmetrize for the new tensor. + :type tensor: torch.Tensor + :param mask: Mask defining which edges go in the correct direction. + :type mask: torch.Tensor + :param reorder_idx: Indices defining how to reorder the tensor values after + concatenating the edge values of both directions. + :type reorder_idx: torch.Tensor + :param opposite_neg: Whether the edge in the opposite direction should use the + negative tensor value. + :type opposite_neg: bool + + :returns: **tensor_ordered** -- A tensor with symmetrized values. + :rtype: torch.Tensor + + + .. py:method:: symmetrize_edges(graph, batch_idx) + + Symmetrize edges to ensure existence of counter-directional edges. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. + We only use i->j edges here. So we lose some j->i edges + and add others by making it symmetric. + + + .. py:method:: subselect_edges(data, graph, cutoff=None, max_neighbors=None) + + Subselect edges using a stricter cutoff and max_neighbors. + + + .. py:method:: generate_graph_dict(data, cutoff, max_neighbors) + + Generate a radius/nearest neighbor graph. + + + .. py:method:: subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig) + + If the new cutoff and max_neighbors is different from the original, + subselect the edges of a given graph. + + + .. py:method:: get_graphs_and_indices(data) + + "Generate embedding and interaction graphs and indices. + + + .. py:method:: get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms) + + Calculate and transform basis functions. + + + .. py:method:: forward(data) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/initializers/index.rst b/_sources/autoapi/core/models/gemnet_oc/initializers/index.rst new file mode 100644 index 000000000..00c3376fe --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/initializers/index.rst @@ -0,0 +1,58 @@ +:py:mod:`core.models.gemnet_oc.initializers` +============================================ + +.. py:module:: core.models.gemnet_oc.initializers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.initializers._standardize + core.models.gemnet_oc.initializers.he_orthogonal_init + core.models.gemnet_oc.initializers.grid_init + core.models.gemnet_oc.initializers.log_grid_init + core.models.gemnet_oc.initializers.get_initializer + + + +.. py:function:: _standardize(kernel) + + Makes sure that N*Var(W) = 1 and E[W] = 0 + + +.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor + + Generate a weight matrix with variance according to He (Kaiming) initialization. + Based on a random (semi-)orthogonal matrix neural networks + are expected to learn better when features are decorrelated + (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", + "Dropout: a simple way to prevent neural networks from overfitting", + "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") + + +.. py:function:: grid_init(tensor: torch.Tensor, start: int = -1, end: int = 1) -> torch.Tensor + + Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end. + + +.. py:function:: log_grid_init(tensor: torch.Tensor, start: int = -4, end: int = 0) -> torch.Tensor + + Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end. + + +.. py:function:: get_initializer(name, **init_kwargs) + + diff --git a/_sources/autoapi/core/models/gemnet_oc/interaction_indices/index.rst b/_sources/autoapi/core/models/gemnet_oc/interaction_indices/index.rst new file mode 100644 index 000000000..817102579 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/interaction_indices/index.rst @@ -0,0 +1,132 @@ +:py:mod:`core.models.gemnet_oc.interaction_indices` +=================================================== + +.. py:module:: core.models.gemnet_oc.interaction_indices + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.interaction_indices.get_triplets + core.models.gemnet_oc.interaction_indices.get_mixed_triplets + core.models.gemnet_oc.interaction_indices.get_quadruplets + + + +.. py:function:: get_triplets(graph, num_atoms: int) + + Get all input edges b->a for each output edge c->a. + It is possible that b=c, as long as the edges are distinct + (i.e. atoms b and c stem from different unit cells). + + :param graph: Contains the graph's edge_index. + :type graph: dict of torch.Tensor + :param num_atoms: Total number of atoms. + :type num_atoms: int + + :returns: + + in: torch.Tensor, shape (num_triplets,) + Indices of input edge b->a of each triplet b->a<-c + out: torch.Tensor, shape (num_triplets,) + Indices of output edge c->a of each triplet b->a<-c + out_agg: torch.Tensor, shape (num_triplets,) + Indices enumerating the intermediate edges of each output edge. + Used for creating a padded matrix and aggregating via matmul. + :rtype: Dictionary containing the entries + + +.. py:function:: get_mixed_triplets(graph_in, graph_out, num_atoms, to_outedge=False, return_adj=False, return_agg_idx=False) + + Get all output edges (ingoing or outgoing) for each incoming edge. + It is possible that in atom=out atom, as long as the edges are distinct + (i.e. they stem from different unit cells). In edges and out edges stem + from separate graphs (hence "mixed") with shared atoms. + + :param graph_in: Contains the input graph's edge_index and cell_offset. + :type graph_in: dict of torch.Tensor + :param graph_out: Contains the output graph's edge_index and cell_offset. + Input and output graphs use the same atoms, but different edges. + :type graph_out: dict of torch.Tensor + :param num_atoms: Total number of atoms. + :type num_atoms: int + :param to_outedge: Whether to map the output to the atom's outgoing edges a->c + instead of the ingoing edges c->a. + :type to_outedge: bool + :param return_adj: Whether to output the adjacency (incidence) matrix between output + edges and atoms adj_edges. + :type return_adj: bool + :param return_agg_idx: Whether to output the indices enumerating the intermediate edges + of each output edge. + :type return_agg_idx: bool + + :returns: + + in: torch.Tensor, shape (num_triplets,) + Indices of input edges + out: torch.Tensor, shape (num_triplets,) + Indices of output edges + adj_edges: SparseTensor, shape (num_edges, num_atoms) + Adjacency (incidence) matrix between output edges and atoms, + with values specifying the input edges. + Only returned if return_adj is True. + out_agg: torch.Tensor, shape (num_triplets,) + Indices enumerating the intermediate edges of each output edge. + Used for creating a padded matrix and aggregating via matmul. + Only returned if return_agg_idx is True. + :rtype: Dictionary containing the entries + + +.. py:function:: get_quadruplets(main_graph, qint_graph, num_atoms) + + Get all d->b for each edge c->a and connection b->a + Careful about periodic images! + Separate interaction cutoff not supported. + + :param main_graph: Contains the main graph's edge_index and cell_offset. + The main graph defines which edges are embedded. + :type main_graph: dict of torch.Tensor + :param qint_graph: Contains the quadruplet interaction graph's edge_index and + cell_offset. main_graph and qint_graph use the same atoms, + but different edges. + :type qint_graph: dict of torch.Tensor + :param num_atoms: Total number of atoms. + :type num_atoms: int + + :returns: + + triplet_in['in']: torch.Tensor, shape (nTriplets,) + Indices of input edge d->b in triplet d->b->a. + triplet_in['out']: torch.Tensor, shape (nTriplets,) + Interaction indices of output edge b->a in triplet d->b->a. + triplet_out['in']: torch.Tensor, shape (nTriplets,) + Interaction indices of input edge b->a in triplet c->a<-b. + triplet_out['out']: torch.Tensor, shape (nTriplets,) + Indices of output edge c->a in triplet c->a<-b. + out: torch.Tensor, shape (nQuadruplets,) + Indices of output edge c->a in quadruplet + trip_in_to_quad: torch.Tensor, shape (nQuadruplets,) + Indices to map from input triplet d->b->a + to quadruplet d->b->a<-c. + trip_out_to_quad: torch.Tensor, shape (nQuadruplets,) + Indices to map from output triplet c->a<-b + to quadruplet d->b->a<-c. + out_agg: torch.Tensor, shape (num_triplets,) + Indices enumerating the intermediate edges of each output edge. + Used for creating a padded matrix and aggregating via matmul. + :rtype: Dictionary containing the entries + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.rst new file mode 100644 index 000000000..27bcd5daf --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.rst @@ -0,0 +1,85 @@ +:py:mod:`core.models.gemnet_oc.layers.atom_update_block` +======================================================== + +.. py:module:: core.models.gemnet_oc.layers.atom_update_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.atom_update_block.AtomUpdateBlock + core.models.gemnet_oc.layers.atom_update_block.OutputBlock + + + + +.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Aggregate the message embeddings of the atoms + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_rbf: Embedding size of the radial basis. + :type emb_size_rbf: int + :param nHidden: Number of residual blocks. + :type nHidden: int + :param activation: Name of the activation function to use in the dense layers. + :type activation: callable/str + + .. py:method:: get_mlp(units_in: int, units: int, nHidden: int, activation) + + + .. py:method:: forward(h: torch.Tensor, m, basis_rad, idx_atom) + + :returns: **h** -- Atom embedding. + :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) + + + +.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, nHidden_afteratom: int, activation: str | None = None, direct_forces: bool = True) + + + Bases: :py:obj:`AtomUpdateBlock` + + Combines the atom update block and subsequent final dense layer. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_rbf: Embedding size of the radial basis. + :type emb_size_rbf: int + :param nHidden: Number of residual blocks before adding the atom embedding. + :type nHidden: int + :param nHidden_afteratom: Number of residual blocks after adding the atom embedding. + :type nHidden_afteratom: int + :param activation: Name of the activation function to use in the dense layers. + :type activation: str + :param direct_forces: If true directly predict forces, i.e. without taking the gradient + of the energy potential. + :type direct_forces: bool + + .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, basis_rad, idx_atom) + + :returns: * *torch.Tensor, shape=(nAtoms, emb_size_atom)* -- Output atom embeddings. + * *torch.Tensor, shape=(nEdges, emb_size_edge)* -- Output edge embeddings. + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/base_layers/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/base_layers/index.rst new file mode 100644 index 000000000..999181d51 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/base_layers/index.rst @@ -0,0 +1,111 @@ +:py:mod:`core.models.gemnet_oc.layers.base_layers` +================================================== + +.. py:module:: core.models.gemnet_oc.layers.base_layers + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.base_layers.Dense + core.models.gemnet_oc.layers.base_layers.ScaledSiLU + core.models.gemnet_oc.layers.base_layers.ResidualLayer + + + + +.. py:class:: Dense(in_features: int, out_features: int, bias: bool = False, activation: str | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Combines dense layer with scaling for silu activation. + + :param in_features: Input embedding size. + :type in_features: int + :param out_features: Output embedding size. + :type out_features: int + :param bias: True if use bias. + :type bias: bool + :param activation: Name of the activation function to use. + :type activation: str + + .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None + + + .. py:method:: forward(x) + + + +.. py:class:: ScaledSiLU + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x) + + + +.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) + + + Bases: :py:obj:`torch.nn.Module` + + Residual block with output scaled by 1/sqrt(2). + + :param units: Input and output embedding size. + :type units: int + :param nLayers: Number of dense layers. + :type nLayers: int + :param layer: Class for the layers inside the residual block. + :type layer: torch.nn.Module + :param layer_kwargs: Keyword arguments for initializing the layers. + :type layer_kwargs: str + + .. py:method:: forward(input) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/basis_utils/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/basis_utils/index.rst new file mode 100644 index 000000000..a725a8538 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/basis_utils/index.rst @@ -0,0 +1,127 @@ +:py:mod:`core.models.gemnet_oc.layers.basis_utils` +================================================== + +.. py:module:: core.models.gemnet_oc.layers.basis_utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.basis_utils.Jn + core.models.gemnet_oc.layers.basis_utils.Jn_zeros + core.models.gemnet_oc.layers.basis_utils.spherical_bessel_formulas + core.models.gemnet_oc.layers.basis_utils.bessel_basis + core.models.gemnet_oc.layers.basis_utils.sph_harm_prefactor + core.models.gemnet_oc.layers.basis_utils.associated_legendre_polynomials + core.models.gemnet_oc.layers.basis_utils.real_sph_harm + core.models.gemnet_oc.layers.basis_utils.get_sph_harm_basis + + + +.. py:function:: Jn(r: float, n: int) + + numerical spherical bessel functions of order n + + +.. py:function:: Jn_zeros(n: int, k: int) + + Compute the first k zeros of the spherical bessel functions + up to order n (excluded) + + +.. py:function:: spherical_bessel_formulas(n: int) + + Computes the sympy formulas for the spherical bessel functions + up to order n (excluded) + + +.. py:function:: bessel_basis(n: int, k: int) + + Compute the sympy formulas for the normalized and rescaled spherical bessel + functions up to order n (excluded) and maximum frequency k (excluded). + + :returns: **bess_basis** -- Bessel basis formulas taking in a single argument x. + Has length n where each element has length k. -> In total n*k many. + :rtype: list + + +.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) + + Computes the constant pre-factor for the spherical harmonic + of degree l and order m. + + :param l_degree: Degree of the spherical harmonic. l >= 0 + :type l_degree: int + :param m_order: Order of the spherical harmonic. -l <= m <= l + :type m_order: int + + :returns: **factor** + :rtype: float + + +.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) + + Computes string formulas of the associated legendre polynomials + up to degree L (excluded). + + :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials + (degree L is excluded). + :type L_maxdegree: int + :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. + :type zero_m_only: bool + :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. + Overwritten by zero_m_only. + :type pos_m_only: bool + + :returns: **polynomials** -- Contains the sympy functions of the polynomials + (in total L many if zero_m_only is True else L^2 many). + :rtype: list + + +.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) -> None + + Computes formula strings of the the real part of the spherical harmonics + up to degree L (excluded). Variables are either spherical coordinates phi + and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. + + :param L_maxdegree: Degree up to which to calculate the spherical harmonics + (degree L is excluded). + :type L_maxdegree: int + :param use_theta: + - True: Expects the input of the formula strings to contain theta. + - False: Expects the input of the formula strings to contain z. + :type use_theta: bool + :param use_phi: + - True: Expects the input of the formula strings to contain phi. + - False: Expects the input of the formula strings to contain x and y. + Does nothing if zero_m_only is True + :type use_phi: bool + :param zero_m_only: If True only calculate the harmonics where m=0. + :type zero_m_only: bool + + :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical + harmonics up to degree L (where degree L is not excluded). + In total L^2 many sph harm exist up to degree L (excluded). + However, if zero_m_only only is True then the total count + is reduced to L. + :rtype: list + + +.. py:function:: get_sph_harm_basis(L_maxdegree: int, zero_m_only: bool = True) + + Get a function calculating the spherical harmonics basis from z and phi. + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/efficient/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/efficient/index.rst new file mode 100644 index 000000000..9ba029e8a --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/efficient/index.rst @@ -0,0 +1,118 @@ +:py:mod:`core.models.gemnet_oc.layers.efficient` +================================================ + +.. py:module:: core.models.gemnet_oc.layers.efficient + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.efficient.BasisEmbedding + core.models.gemnet_oc.layers.efficient.EfficientInteractionBilinear + + + + +.. py:class:: BasisEmbedding(num_radial: int, emb_size_interm: int, num_spherical: int | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Embed a basis (CBF, SBF), optionally using the efficient reformulation. + + :param num_radial: Number of radial basis functions. + :type num_radial: int + :param emb_size_interm: Intermediate embedding size of triplets/quadruplets. + :type emb_size_interm: int + :param num_spherical: Number of circular/spherical basis functions. + Only required if there is a circular/spherical basis. + :type num_spherical: int + + .. py:attribute:: weight + :type: torch.nn.Parameter + + + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(rad_basis, sph_basis=None, idx_rad_outer=None, idx_rad_inner=None, idx_sph_outer=None, idx_sph_inner=None, num_atoms=None) + + :param rad_basis: Raw radial basis. + :type rad_basis: torch.Tensor, shape=(num_edges, num_radial or num_orders * num_radial) + :param sph_basis: Raw spherical or circular basis. + :type sph_basis: torch.Tensor, shape=(num_triplets or num_quadruplets, num_spherical) + :param idx_rad_outer: Atom associated with each radial basis value. + Optional, used for efficient edge aggregation. + :type idx_rad_outer: torch.Tensor, shape=(num_edges) + :param idx_rad_inner: Enumerates radial basis values per atom. + Optional, used for efficient edge aggregation. + :type idx_rad_inner: torch.Tensor, shape=(num_edges) + :param idx_sph_outer: Edge associated with each circular/spherical basis value. + Optional, used for efficient triplet/quadruplet aggregation. + :type idx_sph_outer: torch.Tensor, shape=(num_triplets or num_quadruplets) + :param idx_sph_inner: Enumerates circular/spherical basis values per edge. + Optional, used for efficient triplet/quadruplet aggregation. + :type idx_sph_inner: torch.Tensor, shape=(num_triplets or num_quadruplets) + :param num_atoms: Total number of atoms. + Optional, used for efficient edge aggregation. + :type num_atoms: int + + :returns: * **rad_W1** (*torch.Tensor, shape=(num_edges, emb_size_interm, num_spherical)*) + * **sph** (*torch.Tensor, shape=(num_edges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges + + + +.. py:class:: EfficientInteractionBilinear(emb_size_in: int, emb_size_interm: int, emb_size_out: int) + + + Bases: :py:obj:`torch.nn.Module` + + Efficient reformulation of the bilinear layer and subsequent summation. + + :param emb_size_in: Embedding size of input triplets/quadruplets. + :type emb_size_in: int + :param emb_size_interm: Intermediate embedding size of the basis transformation. + :type emb_size_interm: int + :param emb_size_out: Embedding size of output triplets/quadruplets. + :type emb_size_out: int + + .. py:method:: forward(basis, m, idx_agg_outer, idx_agg_inner, idx_agg2_outer=None, idx_agg2_inner=None, agg2_out_size=None) + + :param basis: + shapes=((num_edges, emb_size_interm, num_spherical), + (num_edges, num_spherical, Kmax)) + First element: Radial basis multiplied with weight matrix + Second element: Circular/spherical basis + :type basis: Tuple (torch.Tensor, torch.Tensor), + :param m: Input edge embeddings + :type m: torch.Tensor, shape=(num_edges, emb_size_in) + :param idx_agg_outer: Output edge aggregating this intermediate triplet/quadruplet edge. + :type idx_agg_outer: torch.Tensor, shape=(num_triplets or num_quadruplets) + :param idx_agg_inner: Enumerates intermediate edges per output edge. + :type idx_agg_inner: torch.Tensor, shape=(num_triplets or num_quadruplets) + :param idx_agg2_outer: Output atom aggregating this edge. + :type idx_agg2_outer: torch.Tensor, shape=(num_edges) + :param idx_agg2_inner: Enumerates edges per output atom. + :type idx_agg2_inner: torch.Tensor, shape=(num_edges) + :param agg2_out_size: Number of output embeddings when aggregating twice. Typically + the number of atoms. + :type agg2_out_size: int + + :returns: **m_ca** -- Aggregated edge/atom embeddings. + :rtype: torch.Tensor, shape=(num_edges, emb_size) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst new file mode 100644 index 000000000..e448091e5 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst @@ -0,0 +1,74 @@ +:py:mod:`core.models.gemnet_oc.layers.embedding_block` +====================================================== + +.. py:module:: core.models.gemnet_oc.layers.embedding_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.embedding_block.AtomEmbedding + core.models.gemnet_oc.layers.embedding_block.EdgeEmbedding + + + + +.. py:class:: AtomEmbedding(emb_size: int, num_elements: int) + + + Bases: :py:obj:`torch.nn.Module` + + Initial atom embeddings based on the atom type + + :param emb_size: Atom embeddings size + :type emb_size: int + + .. py:method:: forward(Z) -> torch.Tensor + + :returns: **h** -- Atom embeddings. + :rtype: torch.Tensor, shape=(nAtoms, emb_size) + + + +.. py:class:: EdgeEmbedding(atom_features: int, edge_features: int, out_features: int, activation: str | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Edge embedding based on the concatenation of atom embeddings + and a subsequent dense layer. + + :param atom_features: Embedding size of the atom embedding. + :type atom_features: int + :param edge_features: Embedding size of the input edge embedding. + :type edge_features: int + :param out_features: Embedding size after the dense layer. + :type out_features: int + :param activation: Activation function used in the dense layer. + :type activation: str + + .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, edge_index) -> torch.Tensor + + :param h: Atom embeddings. + :type h: torch.Tensor, shape (num_atoms, atom_features) + :param m: Radial basis in embedding block, + edge embedding in interaction block. + :type m: torch.Tensor, shape (num_edges, edge_features) + + :returns: **m_st** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, emb_size) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/force_scaler/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/force_scaler/index.rst new file mode 100644 index 000000000..a8259ae3d --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/force_scaler/index.rst @@ -0,0 +1,49 @@ +:py:mod:`core.models.gemnet_oc.layers.force_scaler` +=================================================== + +.. py:module:: core.models.gemnet_oc.layers.force_scaler + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.force_scaler.ForceScaler + + + + +.. py:class:: ForceScaler(init_scale: float = 2.0**8, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, max_force_iters: int = 50, enabled: bool = True) + + + Scales up the energy and then scales down the forces + to prevent NaNs and infs in calculations using AMP. + Inspired by torch.cuda.amp.GradScaler. + + .. py:method:: scale(energy) + + + .. py:method:: unscale(forces) + + + .. py:method:: calc_forces(energy, pos) + + + .. py:method:: calc_forces_and_update(energy, pos) + + + .. py:method:: update() -> None + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/index.rst new file mode 100644 index 000000000..dd81c1a25 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/index.rst @@ -0,0 +1,23 @@ +:py:mod:`core.models.gemnet_oc.layers` +====================================== + +.. py:module:: core.models.gemnet_oc.layers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + atom_update_block/index.rst + base_layers/index.rst + basis_utils/index.rst + efficient/index.rst + embedding_block/index.rst + force_scaler/index.rst + interaction_block/index.rst + radial_basis/index.rst + spherical_basis/index.rst + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst new file mode 100644 index 000000000..ae4052a88 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst @@ -0,0 +1,186 @@ +:py:mod:`core.models.gemnet_oc.layers.interaction_block` +======================================================== + +.. py:module:: core.models.gemnet_oc.layers.interaction_block + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.interaction_block.InteractionBlock + core.models.gemnet_oc.layers.interaction_block.QuadrupletInteraction + core.models.gemnet_oc.layers.interaction_block.TripletInteraction + core.models.gemnet_oc.layers.interaction_block.PairInteraction + + + + +.. py:class:: InteractionBlock(emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_a2a_in: int, emb_size_a2a_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_atom_emb_layers: int = 0, quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Interaction block for GemNet-Q/dQ. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_trip_in: int + :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_trip_out: int + :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_quad_in: int + :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_quad_out: int + :param emb_size_a2a_in: Embedding size in the atom interaction before the bilinear layer. + :type emb_size_a2a_in: int + :param emb_size_a2a_out: Embedding size in the atom interaction after the bilinear layer. + :type emb_size_a2a_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). + :type emb_size_sbf: int + :param num_before_skip: Number of residual blocks before the first skip connection. + :type num_before_skip: int + :param num_after_skip: Number of residual blocks after the first skip connection. + :type num_after_skip: int + :param num_concat: Number of residual blocks after the concatenation. + :type num_concat: int + :param num_atom: Number of residual blocks in the atom embedding blocks. + :type num_atom: int + :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. + :type num_atom_emb_layers: int + :param quad_interaction: Whether to use quadruplet interactions. + :type quad_interaction: bool + :param atom_edge_interaction: Whether to use atom-to-edge interactions. + :type atom_edge_interaction: bool + :param edge_atom_interaction: Whether to use edge-to-atom interactions. + :type edge_atom_interaction: bool + :param atom_interaction: Whether to use atom-to-atom interactions. + :type atom_interaction: bool + :param activation: Name of the activation function to use in the dense layers. + :type activation: str + + .. py:method:: forward(h, m, bases_qint, bases_e2e, bases_a2e, bases_e2a, basis_a2a_rad, basis_atom_update, edge_index_main, a2ee2a_graph, a2a_graph, id_swap, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx) + + :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. + * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). + + + +.. py:class:: QuadrupletInteraction(emb_size_edge, emb_size_quad_in, emb_size_quad_out, emb_size_rbf, emb_size_cbf, emb_size_sbf, symmetric_mp=True, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Quadruplet-based message passing block. + + :param emb_size_edge: Embedding size of the edges. + :type emb_size_edge: int + :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_quad_in: int + :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_quad_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). + :type emb_size_sbf: int + :param symmetric_mp: Whether to use symmetric message passing and + update the edges in both directions. + :type symmetric_mp: bool + :param activation: Name of the activation function to use in the dense layers. + :type activation: str + + .. py:method:: forward(m, bases, idx, id_swap) + + :returns: **m** -- Edge embeddings (c->a). + :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) + + + +.. py:class:: TripletInteraction(emb_size_in: int, emb_size_out: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_rbf: int, emb_size_cbf: int, symmetric_mp: bool = True, swap_output: bool = True, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Triplet-based message passing block. + + :param emb_size_in: Embedding size of the input embeddings. + :type emb_size_in: int + :param emb_size_out: Embedding size of the output embeddings. + :type emb_size_out: int + :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + :type emb_size_trip_in: int + :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + :type emb_size_trip_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). + :type emb_size_cbf: int + :param symmetric_mp: Whether to use symmetric message passing and + update the edges in both directions. + :type symmetric_mp: bool + :param swap_output: Whether to swap the output embedding directions. + Only relevant if symmetric_mp is False. + :type swap_output: bool + :param activation: Name of the activation function to use in the dense layers. + :type activation: str + + .. py:method:: forward(m, bases, idx, id_swap, expand_idx=None, idx_agg2=None, idx_agg2_inner=None, agg2_out_size=None) + + :returns: **m** -- Edge embeddings. + :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) + + + +.. py:class:: PairInteraction(emb_size_atom, emb_size_pair_in, emb_size_pair_out, emb_size_rbf, activation=None) + + + Bases: :py:obj:`torch.nn.Module` + + Pair-based message passing block. + + :param emb_size_atom: Embedding size of the atoms. + :type emb_size_atom: int + :param emb_size_pair_in: Embedding size of the atom pairs before the bilinear layer. + :type emb_size_pair_in: int + :param emb_size_pair_out: Embedding size of the atom pairs after the bilinear layer. + :type emb_size_pair_out: int + :param emb_size_rbf: Embedding size of the radial basis transformation. + :type emb_size_rbf: int + :param activation: Name of the activation function to use in the dense layers. + :type activation: str + + .. py:method:: forward(h, rad_basis, edge_index, target_neighbor_idx) + + :returns: **h** -- Atom embeddings. + :rtype: torch.Tensor, shape=(num_atoms, emb_size_atom) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst new file mode 100644 index 000000000..faaa84b5e --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst @@ -0,0 +1,156 @@ +:py:mod:`core.models.gemnet_oc.layers.radial_basis` +=================================================== + +.. py:module:: core.models.gemnet_oc.layers.radial_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.radial_basis.PolynomialEnvelope + core.models.gemnet_oc.layers.radial_basis.ExponentialEnvelope + core.models.gemnet_oc.layers.radial_basis.GaussianBasis + core.models.gemnet_oc.layers.radial_basis.SphericalBesselBasis + core.models.gemnet_oc.layers.radial_basis.BernsteinBasis + core.models.gemnet_oc.layers.radial_basis.RadialBasis + + + + +.. py:class:: PolynomialEnvelope(exponent: int) + + + Bases: :py:obj:`torch.nn.Module` + + Polynomial envelope function that ensures a smooth cutoff. + + :param exponent: Exponent of the envelope function. + :type exponent: int + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: ExponentialEnvelope + + + Bases: :py:obj:`torch.nn.Module` + + Exponential envelope function that ensures a smooth cutoff, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: GaussianBasis(start: float = 0.0, stop: float = 5.0, num_gaussians: int = 50, trainable: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist: torch.Tensor) -> torch.Tensor + + + +.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) + + + Bases: :py:obj:`torch.nn.Module` + + First-order spherical Bessel basis + + :param num_radial: Number of basis functions. Controls the maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) + + + Bases: :py:obj:`torch.nn.Module` + + Bernstein polynomial basis, + as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. + SpookyNet: Learning Force Fields with Electronic Degrees of Freedom + and Nonlocal Effects + + :param num_radial: Number of basis functions. Controls the maximum frequency. + :type num_radial: int + :param pregamma_initial: Initial value of exponential coefficient gamma. + Default: gamma = 0.5 * a_0**-1 = 0.94486, + inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 + :type pregamma_initial: float + + .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor + + + +.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, scale_basis: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + :param num_radial: Number of basis functions. Controls the maximum frequency. + :type num_radial: int + :param cutoff: Cutoff distance in Angstrom. + :type cutoff: float + :param rbf: Basis function and its hyperparameters. + :type rbf: dict = {"name": "gaussian"} + :param envelope: Envelope function and its hyperparameters. + :type envelope: dict = {"name": "polynomial", "exponent": 5} + :param scale_basis: Whether to scale the basis values for better numerical stability. + :type scale_basis: bool + + .. py:method:: forward(d: torch.Tensor) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst new file mode 100644 index 000000000..ac7fddcb9 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst @@ -0,0 +1,67 @@ +:py:mod:`core.models.gemnet_oc.layers.spherical_basis` +====================================================== + +.. py:module:: core.models.gemnet_oc.layers.spherical_basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.layers.spherical_basis.CircularBasisLayer + core.models.gemnet_oc.layers.spherical_basis.SphericalBasisLayer + + + + +.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_oc.layers.radial_basis.RadialBasis, cbf: dict, scale_basis: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + 2D Fourier Bessel Basis + + :param num_spherical: Number of basis functions. Controls the maximum frequency. + :type num_spherical: int + :param radial_basis: Radial basis function. + :type radial_basis: RadialBasis + :param cbf: Name and hyperparameters of the circular basis function. + :type cbf: dict + :param scale_basis: Whether to scale the basis values for better numerical stability. + :type scale_basis: bool + + .. py:method:: forward(D_ca, cosφ_cab) + + + +.. py:class:: SphericalBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_oc.layers.radial_basis.RadialBasis, sbf: dict, scale_basis: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + 3D Fourier Bessel Basis + + :param num_spherical: Number of basis functions. Controls the maximum frequency. + :type num_spherical: int + :param radial_basis: Radial basis functions. + :type radial_basis: RadialBasis + :param sbf: Name and hyperparameters of the spherical basis function. + :type sbf: dict + :param scale_basis: Whether to scale the basis values for better numerical stability. + :type scale_basis: bool + + .. py:method:: forward(D_ca, cosφ_cab, θ_cabd) + + + diff --git a/_sources/autoapi/core/models/gemnet_oc/utils/index.rst b/_sources/autoapi/core/models/gemnet_oc/utils/index.rst new file mode 100644 index 000000000..a66317272 --- /dev/null +++ b/_sources/autoapi/core/models/gemnet_oc/utils/index.rst @@ -0,0 +1,176 @@ +:py:mod:`core.models.gemnet_oc.utils` +===================================== + +.. py:module:: core.models.gemnet_oc.utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.gemnet_oc.utils.ragged_range + core.models.gemnet_oc.utils.repeat_blocks + core.models.gemnet_oc.utils.masked_select_sparsetensor_flat + core.models.gemnet_oc.utils.calculate_interatomic_vectors + core.models.gemnet_oc.utils.inner_product_clamped + core.models.gemnet_oc.utils.get_angle + core.models.gemnet_oc.utils.vector_rejection + core.models.gemnet_oc.utils.get_projected_angle + core.models.gemnet_oc.utils.mask_neighbors + core.models.gemnet_oc.utils.get_neighbor_order + core.models.gemnet_oc.utils.get_inner_idx + core.models.gemnet_oc.utils.get_edge_id + + + +.. py:function:: ragged_range(sizes) + + Multiple concatenated ranges. + + .. rubric:: Examples + + sizes = [1 4 2 3] + Return: [0 0 1 2 3 0 1 0 1 2] + + +.. py:function:: repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor + + Repeat blocks of indices. + Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements + + continuous_indexing: Whether to keep increasing the index after each block + start_idx: Starting index + block_inc: Number to increment by after each block, + either global or per block. Shape: len(sizes) - 1 + repeat_inc: Number to increment by after each repetition, + either global or per block + + .. rubric:: Examples + + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False + Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + repeat_inc = 4 + Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + start_idx = 5 + Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + block_inc = 1 + Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] + sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 1 2 0 1 2 3 4 3 4 3 4] + sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True + Return: [0 1 0 1 5 6 5 6] + + +.. py:function:: masked_select_sparsetensor_flat(src, mask) -> torch_sparse.SparseTensor + + +.. py:function:: calculate_interatomic_vectors(R, id_s, id_t, offsets_st) + + Calculate the vectors connecting the given atom pairs, + considering offsets from periodic boundary conditions (PBC). + + :param R: Atom positions. + :type R: Tensor, shape = (nAtoms, 3) + :param id_s: Indices of the source atom of the edges. + :type id_s: Tensor, shape = (nEdges,) + :param id_t: Indices of the target atom of the edges. + :type id_t: Tensor, shape = (nEdges,) + :param offsets_st: PBC offsets of the edges. + Subtract this from the correct direction. + :type offsets_st: Tensor, shape = (nEdges,) + + :returns: **(D_st, V_st)** -- + + D_st: Tensor, shape = (nEdges,) + Distance from atom t to s. + V_st: Tensor, shape = (nEdges,) + Unit direction from atom t to s. + :rtype: tuple + + +.. py:function:: inner_product_clamped(x, y) -> torch.Tensor + + Calculate the inner product between the given normalized vectors, + giving a result between -1 and 1. + + +.. py:function:: get_angle(R_ac, R_ab) -> torch.Tensor + + Calculate angles between atoms c -> a <- b. + + :param R_ac: Vector from atom a to c. + :type R_ac: Tensor, shape = (N, 3) + :param R_ab: Vector from atom a to b. + :type R_ab: Tensor, shape = (N, 3) + + :returns: **angle_cab** -- Angle between atoms c <- a -> b. + :rtype: Tensor, shape = (N,) + + +.. py:function:: vector_rejection(R_ab, P_n) + + Project the vector R_ab onto a plane with normal vector P_n. + + :param R_ab: Vector from atom a to b. + :type R_ab: Tensor, shape = (N, 3) + :param P_n: Normal vector of a plane onto which to project R_ab. + :type P_n: Tensor, shape = (N, 3) + + :returns: **R_ab_proj** -- Projected vector (orthogonal to P_n). + :rtype: Tensor, shape = (N, 3) + + +.. py:function:: get_projected_angle(R_ab, P_n, eps: float = 0.0001) -> torch.Tensor + + Project the vector R_ab onto a plane with normal vector P_n, + then calculate the angle w.r.t. the (x [cross] P_n), + or (y [cross] P_n) if the former would be ill-defined/numerically unstable. + + :param R_ab: Vector from atom a to b. + :type R_ab: Tensor, shape = (N, 3) + :param P_n: Normal vector of a plane onto which to project R_ab. + :type P_n: Tensor, shape = (N, 3) + :param eps: Norm of projection below which to use the y-axis instead of x. + :type eps: float + + :returns: **angle_ab** -- Angle on plane w.r.t. x- or y-axis. + :rtype: Tensor, shape = (N) + + +.. py:function:: mask_neighbors(neighbors, edge_mask) + + +.. py:function:: get_neighbor_order(num_atoms: int, index, atom_distance) -> torch.Tensor + + Give a mask that filters out edges so that each atom has at most + `max_num_neighbors_threshold` neighbors. + + +.. py:function:: get_inner_idx(idx, dim_size) + + Assign an inner index to each element (neighbor) with the same index. + For example, with idx=[0 0 0 1 1 1 1 2 2] this returns [0 1 2 0 1 2 3 0 1]. + These indices allow reshape neighbor indices into a dense matrix. + idx has to be sorted for this to work. + + +.. py:function:: get_edge_id(edge_idx, cell_offsets, num_atoms: int) + + diff --git a/_sources/autoapi/core/models/index.rst b/_sources/autoapi/core/models/index.rst new file mode 100644 index 000000000..5a7aa67e7 --- /dev/null +++ b/_sources/autoapi/core/models/index.rst @@ -0,0 +1,72 @@ +:py:mod:`core.models` +===================== + +.. py:module:: core.models + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + equiformer_v2/index.rst + escn/index.rst + gemnet/index.rst + gemnet_gp/index.rst + gemnet_oc/index.rst + painn/index.rst + scn/index.rst + utils/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + base/index.rst + dimenet_plus_plus/index.rst + model_registry/index.rst + schnet/index.rst + + +Package Contents +---------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.model_name_to_local_file + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.available_pretrained_models + + +.. py:data:: available_pretrained_models + + + +.. py:function:: model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) -> str + + Download a pretrained checkpoint if it does not exist already + + :param model_name: the model name. See available_pretrained_checkpoints. + :type model_name: str + :param local_cache: path to local cache directory + :type local_cache: str or Path + + :returns: local path to checkpoint file + :rtype: str + + diff --git a/_sources/autoapi/core/models/model_registry/index.rst b/_sources/autoapi/core/models/model_registry/index.rst new file mode 100644 index 000000000..220b4ac26 --- /dev/null +++ b/_sources/autoapi/core/models/model_registry/index.rst @@ -0,0 +1,57 @@ +:py:mod:`core.models.model_registry` +==================================== + +.. py:module:: core.models.model_registry + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.model_registry.model_name_to_local_file + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.model_registry.MODEL_REGISTRY + core.models.model_registry.available_pretrained_models + + +.. py:data:: MODEL_REGISTRY + + + +.. py:data:: available_pretrained_models + + + +.. py:function:: model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) -> str + + Download a pretrained checkpoint if it does not exist already + + :param model_name: the model name. See available_pretrained_checkpoints. + :type model_name: str + :param local_cache: path to local cache directory + :type local_cache: str or Path + + :returns: local path to checkpoint file + :rtype: str + + diff --git a/_sources/autoapi/core/models/painn/index.rst b/_sources/autoapi/core/models/painn/index.rst new file mode 100644 index 000000000..82aa440a2 --- /dev/null +++ b/_sources/autoapi/core/models/painn/index.rst @@ -0,0 +1,73 @@ +:py:mod:`core.models.painn` +=========================== + +.. py:module:: core.models.painn + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + painn/index.rst + utils/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.painn.PaiNN + + + + +.. py:class:: PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + PaiNN model based on the description in Schütt et al. (2021): + Equivariant message passing for the prediction of tensorial properties + and molecular spectra, https://arxiv.org/abs/2102.03150. + + .. py:property:: num_params + :type: int + + + .. py:method:: reset_parameters() -> None + + + .. py:method:: select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor + + + .. py:method:: symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg) + + Symmetrize edges to ensure existence of counter-directional edges. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. + If `symmetric_edge_symmetrization` is False, + we only use i->j edges here. So we lose some j->i edges + and add others by making it symmetric. + If `symmetric_edge_symmetrization` is True, + we always use both directions. + + + .. py:method:: generate_graph_values(data) + + + .. py:method:: forward(data) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + diff --git a/_sources/autoapi/core/models/painn/painn/index.rst b/_sources/autoapi/core/models/painn/painn/index.rst new file mode 100644 index 000000000..12fe45835 --- /dev/null +++ b/_sources/autoapi/core/models/painn/painn/index.rst @@ -0,0 +1,312 @@ +:py:mod:`core.models.painn.painn` +================================= + +.. py:module:: core.models.painn.painn + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + --- + + MIT License + + Copyright (c) 2021 www.compscience.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.painn.painn.PaiNN + core.models.painn.painn.PaiNNMessage + core.models.painn.painn.PaiNNUpdate + core.models.painn.painn.PaiNNOutput + core.models.painn.painn.GatedEquivariantBlock + + + + +.. py:class:: PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + PaiNN model based on the description in Schütt et al. (2021): + Equivariant message passing for the prediction of tensorial properties + and molecular spectra, https://arxiv.org/abs/2102.03150. + + .. py:property:: num_params + :type: int + + + .. py:method:: reset_parameters() -> None + + + .. py:method:: select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor + + + .. py:method:: symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg) + + Symmetrize edges to ensure existence of counter-directional edges. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. + If `symmetric_edge_symmetrization` is False, + we only use i->j edges here. So we lose some j->i edges + and add others by making it symmetric. + If `symmetric_edge_symmetrization` is True, + we always use both directions. + + + .. py:method:: generate_graph_values(data) + + + .. py:method:: forward(data) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + +.. py:class:: PaiNNMessage(hidden_channels, num_rbf) + + + Bases: :py:obj:`torch_geometric.nn.MessagePassing` + + Base class for creating message passing layers of the form + + .. math:: + \mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, + \bigoplus_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} + \left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right), + + where :math:`\bigoplus` denotes a differentiable, permutation invariant + function, *e.g.*, sum, mean, min, max or mul, and + :math:`\gamma_{\mathbf{\Theta}}` and :math:`\phi_{\mathbf{\Theta}}` denote + differentiable functions such as MLPs. + See `here `__ for the accompanying tutorial. + + :param aggr: The aggregation scheme + to use, *e.g.*, :obj:`"add"`, :obj:`"sum"` :obj:`"mean"`, + :obj:`"min"`, :obj:`"max"` or :obj:`"mul"`. + In addition, can be any + :class:`~torch_geometric.nn.aggr.Aggregation` module (or any string + that automatically resolves to it). + If given as a list, will make use of multiple aggregations in which + different outputs will get concatenated in the last dimension. + If set to :obj:`None`, the :class:`MessagePassing` instantiation is + expected to implement its own aggregation logic via + :meth:`aggregate`. (default: :obj:`"add"`) + :type aggr: str or [str] or Aggregation, optional + :param aggr_kwargs: Arguments passed to the + respective aggregation function in case it gets automatically + resolved. (default: :obj:`None`) + :type aggr_kwargs: Dict[str, Any], optional + :param flow: The flow direction of message passing + (:obj:`"source_to_target"` or :obj:`"target_to_source"`). + (default: :obj:`"source_to_target"`) + :type flow: str, optional + :param node_dim: The axis along which to propagate. + (default: :obj:`-2`) + :type node_dim: int, optional + :param decomposed_layers: The number of feature decomposition + layers, as introduced in the `"Optimizing Memory Efficiency of + Graph Neural Networks on Edge Computing Platforms" + `_ paper. + Feature decomposition reduces the peak memory usage by slicing + the feature dimensions into separated feature decomposition layers + during GNN aggregation. + This method can accelerate GNN execution on CPU-based platforms + (*e.g.*, 2-3x speedup on the + :class:`~torch_geometric.datasets.Reddit` dataset) for common GNN + models such as :class:`~torch_geometric.nn.models.GCN`, + :class:`~torch_geometric.nn.models.GraphSAGE`, + :class:`~torch_geometric.nn.models.GIN`, etc. + However, this method is not applicable to all GNN operators + available, in particular for operators in which message computation + can not easily be decomposed, *e.g.* in attention-based GNNs. + The selection of the optimal value of :obj:`decomposed_layers` + depends both on the specific graph dataset and available hardware + resources. + A value of :obj:`2` is suitable in most cases. + Although the peak memory usage is directly associated with the + granularity of feature decomposition, the same is not necessarily + true for execution speedups. (default: :obj:`1`) + :type decomposed_layers: int, optional + + .. py:method:: reset_parameters() -> None + + Resets all learnable parameters of the module. + + + .. py:method:: forward(x, vec, edge_index, edge_rbf, edge_vector) + + Runs the forward pass of the module. + + + .. py:method:: message(xh_j, vec_j, rbfh_ij, r_ij) + + Constructs messages from node :math:`j` to node :math:`i` + in analogy to :math:`\phi_{\mathbf{\Theta}}` for each edge in + :obj:`edge_index`. + This function can take any argument as input which was initially + passed to :meth:`propagate`. + Furthermore, tensors passed to :meth:`propagate` can be mapped to the + respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or + :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. + + + .. py:method:: aggregate(features: tuple[torch.Tensor, torch.Tensor], index: torch.Tensor, dim_size: int) -> tuple[torch.Tensor, torch.Tensor] + + Aggregates messages from neighbors as + :math:`\bigoplus_{j \in \mathcal{N}(i)}`. + + Takes in the output of message computation as first argument and any + argument which was initially passed to :meth:`propagate`. + + By default, this function will delegate its call to the underlying + :class:`~torch_geometric.nn.aggr.Aggregation` module to reduce messages + as specified in :meth:`__init__` by the :obj:`aggr` argument. + + + .. py:method:: update(inputs: tuple[torch.Tensor, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor] + + Updates node embeddings in analogy to + :math:`\gamma_{\mathbf{\Theta}}` for each node + :math:`i \in \mathcal{V}`. + Takes in the output of aggregation as first argument and any argument + which was initially passed to :meth:`propagate`. + + + +.. py:class:: PaiNNUpdate(hidden_channels) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(x, vec) + + + +.. py:class:: PaiNNOutput(hidden_channels) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(x, vec) + + + +.. py:class:: GatedEquivariantBlock(hidden_channels, out_channels) + + + Bases: :py:obj:`torch.nn.Module` + + Gated Equivariant Block as defined in Schütt et al. (2021): + Equivariant message passing for the prediction of tensorial properties and molecular spectra + + .. py:method:: reset_parameters() -> None + + + .. py:method:: forward(x, v) + + + diff --git a/_sources/autoapi/core/models/painn/utils/index.rst b/_sources/autoapi/core/models/painn/utils/index.rst new file mode 100644 index 000000000..c3722abaa --- /dev/null +++ b/_sources/autoapi/core/models/painn/utils/index.rst @@ -0,0 +1,64 @@ +:py:mod:`core.models.painn.utils` +================================= + +.. py:module:: core.models.painn.utils + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.painn.utils.repeat_blocks + core.models.painn.utils.get_edge_id + + + +.. py:function:: repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor + + Repeat blocks of indices. + Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements + + continuous_indexing: Whether to keep increasing the index after each block + start_idx: Starting index + block_inc: Number to increment by after each block, + either global or per block. Shape: len(sizes) - 1 + repeat_inc: Number to increment by after each repetition, + either global or per block + + .. rubric:: Examples + + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False + Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + repeat_inc = 4 + Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + start_idx = 5 + Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] + sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; + block_inc = 1 + Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] + sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True + Return: [0 1 2 0 1 2 3 4 3 4 3 4] + sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True + Return: [0 1 0 1 5 6 5 6] + + +.. py:function:: get_edge_id(edge_idx, cell_offsets, num_atoms: int) + + diff --git a/_sources/autoapi/core/models/schnet/index.rst b/_sources/autoapi/core/models/schnet/index.rst new file mode 100644 index 000000000..bea77e4ed --- /dev/null +++ b/_sources/autoapi/core/models/schnet/index.rst @@ -0,0 +1,98 @@ +:py:mod:`core.models.schnet` +============================ + +.. py:module:: core.models.schnet + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.schnet.SchNetWrap + + + + +.. py:class:: SchNetWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, hidden_channels: int = 128, num_filters: int = 128, num_interactions: int = 6, num_gaussians: int = 50, cutoff: float = 10.0, readout: str = 'add') + + + Bases: :py:obj:`torch_geometric.nn.SchNet`, :py:obj:`fairchem.core.models.base.BaseModel` + + Wrapper around the continuous-filter convolutional neural network SchNet from the + `"SchNet: A Continuous-filter Convolutional Neural Network for Modeling + Quantum Interactions" `_. Each layer uses interaction + block of the form: + + .. math:: + \mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot + h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))), + + :param num_atoms: Unused argument + :type num_atoms: int + :param bond_feat_dim: Unused argument + :type bond_feat_dim: int + :param num_targets: Number of targets to predict. + :type num_targets: int + :param use_pbc: If set to :obj:`True`, account for periodic boundary conditions. + (default: :obj:`True`) + :type use_pbc: bool, optional + :param regress_forces: If set to :obj:`True`, predict forces by differentiating + energy with respect to positions. + (default: :obj:`True`) + :type regress_forces: bool, optional + :param otf_graph: If set to :obj:`True`, compute graph edges on the fly. + (default: :obj:`False`) + :type otf_graph: bool, optional + :param hidden_channels: Number of hidden channels. + (default: :obj:`128`) + :type hidden_channels: int, optional + :param num_filters: Number of filters to use. + (default: :obj:`128`) + :type num_filters: int, optional + :param num_interactions: Number of interaction blocks + (default: :obj:`6`) + :type num_interactions: int, optional + :param num_gaussians: The number of gaussians :math:`\mu`. + (default: :obj:`50`) + :type num_gaussians: int, optional + :param cutoff: Cutoff distance for interatomic interactions. + (default: :obj:`10.0`) + :type cutoff: float, optional + :param readout: Whether to apply :obj:`"add"` or + :obj:`"mean"` global aggregation. (default: :obj:`"add"`) + :type readout: string, optional + + .. py:property:: num_params + :type: int + + + .. py:method:: _forward(data) + + + .. py:method:: forward(data) + + :param z: Atomic number of each atom with shape + :obj:`[num_atoms]`. + :type z: torch.Tensor + :param pos: Coordinates of each atom with shape + :obj:`[num_atoms, 3]`. + :type pos: torch.Tensor + :param batch: Batch indices assigning each atom + to a separate molecule with shape :obj:`[num_atoms]`. + (default: :obj:`None`) + :type batch: torch.Tensor, optional + + + diff --git a/_sources/autoapi/core/models/scn/index.rst b/_sources/autoapi/core/models/scn/index.rst new file mode 100644 index 000000000..b5f2b61c0 --- /dev/null +++ b/_sources/autoapi/core/models/scn/index.rst @@ -0,0 +1,131 @@ +:py:mod:`core.models.scn` +========================= + +.. py:module:: core.models.scn + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + sampling/index.rst + scn/index.rst + smearing/index.rst + spherical_harmonics/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.scn.SphericalChannelNetwork + + + + +.. py:class:: SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Spherical Channel Network + Paper: Spherical Channels for Modeling Atomic Interactions + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_num_neighbors: Maximum number of neighbors per atom + :type max_num_neighbors: int + :param cutoff: Maximum distance between nieghboring atoms in Angstroms + :type cutoff: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_interactions: Number of layers in the GNN + :type num_interactions: int + :param lmax: Maximum degree of the spherical harmonics (1 to 10) + :type lmax: int + :param mmax: Maximum order of the spherical harmonics (0 or 1) + :type mmax: int + :param num_resolutions: Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2) + :type num_resolutions: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param sphere_channels_reduce: Number of spherical channels used during message passing (downsample or upsample) + :type sphere_channels_reduce: int + :param hidden_channels: Number of hidden units in message passing + :type hidden_channels: int + :param num_taps: Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax) + :type num_taps: int + :param use_grid: Use non-linear pointwise convolution during aggregation + :type use_grid: bool + :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) + :type num_bands: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param num_basis_functions: Number of basis functions used for distance and atomic number blocks + :type num_basis_functions: int + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param basis_width_scalar: Width of distance basis function + :type basis_width_scalar: float + :param distance_resolution: Distance between distance basis functions in Angstroms + :type distance_resolution: float + :param show_timing_info: Show timing and memory info + :type show_timing_info: bool + + .. py:property:: num_params + :type: int + + + .. py:attribute:: energy_fc1 + :type: torch.nn.Linear + + + + .. py:attribute:: energy_fc2 + :type: torch.nn.Linear + + + + .. py:attribute:: energy_fc3 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc1 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc2 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc3 + :type: torch.nn.Linear + + + + .. py:method:: forward(data) + + + .. py:method:: _forward_helper(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + .. py:method:: _rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/scn/sampling/index.rst b/_sources/autoapi/core/models/scn/sampling/index.rst new file mode 100644 index 000000000..6cd596fb3 --- /dev/null +++ b/_sources/autoapi/core/models/scn/sampling/index.rst @@ -0,0 +1,34 @@ +:py:mod:`core.models.scn.sampling` +================================== + +.. py:module:: core.models.scn.sampling + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.scn.sampling.CalcSpherePoints + core.models.scn.sampling.CalcSpherePointsRandom + + + +.. py:function:: CalcSpherePoints(num_points: int, device: str = 'cpu') -> torch.Tensor + + +.. py:function:: CalcSpherePointsRandom(num_points: int, device) -> torch.Tensor + + diff --git a/_sources/autoapi/core/models/scn/scn/index.rst b/_sources/autoapi/core/models/scn/scn/index.rst new file mode 100644 index 000000000..aa1235dcb --- /dev/null +++ b/_sources/autoapi/core/models/scn/scn/index.rst @@ -0,0 +1,250 @@ +:py:mod:`core.models.scn.scn` +============================= + +.. py:module:: core.models.scn.scn + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.scn.scn.SphericalChannelNetwork + core.models.scn.scn.EdgeBlock + core.models.scn.scn.MessageBlock + core.models.scn.scn.DistanceBlock + + + + +.. py:class:: SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True) + + + Bases: :py:obj:`fairchem.core.models.base.BaseModel` + + Spherical Channel Network + Paper: Spherical Channels for Modeling Atomic Interactions + + :param use_pbc: Use periodic boundary conditions + :type use_pbc: bool + :param regress_forces: Compute forces + :type regress_forces: bool + :param otf_graph: Compute graph On The Fly (OTF) + :type otf_graph: bool + :param max_num_neighbors: Maximum number of neighbors per atom + :type max_num_neighbors: int + :param cutoff: Maximum distance between nieghboring atoms in Angstroms + :type cutoff: float + :param max_num_elements: Maximum atomic number + :type max_num_elements: int + :param num_interactions: Number of layers in the GNN + :type num_interactions: int + :param lmax: Maximum degree of the spherical harmonics (1 to 10) + :type lmax: int + :param mmax: Maximum order of the spherical harmonics (0 or 1) + :type mmax: int + :param num_resolutions: Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2) + :type num_resolutions: int + :param sphere_channels: Number of spherical channels + :type sphere_channels: int + :param sphere_channels_reduce: Number of spherical channels used during message passing (downsample or upsample) + :type sphere_channels_reduce: int + :param hidden_channels: Number of hidden units in message passing + :type hidden_channels: int + :param num_taps: Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax) + :type num_taps: int + :param use_grid: Use non-linear pointwise convolution during aggregation + :type use_grid: bool + :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) + :type num_bands: int + :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks + :type num_sphere_samples: int + :param num_basis_functions: Number of basis functions used for distance and atomic number blocks + :type num_basis_functions: int + :param distance_function: Basis function used for distances + :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" + :param basis_width_scalar: Width of distance basis function + :type basis_width_scalar: float + :param distance_resolution: Distance between distance basis functions in Angstroms + :type distance_resolution: float + :param show_timing_info: Show timing and memory info + :type show_timing_info: bool + + .. py:property:: num_params + :type: int + + + .. py:attribute:: energy_fc1 + :type: torch.nn.Linear + + + + .. py:attribute:: energy_fc2 + :type: torch.nn.Linear + + + + .. py:attribute:: energy_fc3 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc1 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc2 + :type: torch.nn.Linear + + + + .. py:attribute:: force_fc3 + :type: torch.nn.Linear + + + + .. py:method:: forward(data) + + + .. py:method:: _forward_helper(data) + + + .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) + + + .. py:method:: _rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) -> torch.Tensor + + + +.. py:class:: EdgeBlock(num_resolutions: int, sphere_channels_reduce, hidden_channels_list, cutoff_list, sphharm_list, sphere_channels, distance_expansion, max_num_elements: int, num_basis_functions: int, num_gaussians: int, use_grid: bool, act) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, cutoff_index) + + + +.. py:class:: MessageBlock(sphere_channels_reduce, hidden_channels, num_basis_functions, sphharm, act) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x, x_edge, edge_index) + + + +.. py:class:: DistanceBlock(in_channels, num_basis_functions: int, distance_expansion, max_num_elements: int, act) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(edge_distance, source_element, target_element) + + + diff --git a/_sources/autoapi/core/models/scn/smearing/index.rst b/_sources/autoapi/core/models/scn/smearing/index.rst new file mode 100644 index 000000000..ce325292a --- /dev/null +++ b/_sources/autoapi/core/models/scn/smearing/index.rst @@ -0,0 +1,190 @@ +:py:mod:`core.models.scn.smearing` +================================== + +.. py:module:: core.models.scn.smearing + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.scn.smearing.GaussianSmearing + core.models.scn.smearing.SigmoidSmearing + core.models.scn.smearing.LinearSigmoidSmearing + core.models.scn.smearing.SiLUSmearing + + + + +.. py:class:: GaussianSmearing(start: float = -5.0, stop: float = 5.0, num_gaussians: int = 50, basis_width_scalar: float = 1.0) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist) -> torch.Tensor + + + +.. py:class:: SigmoidSmearing(start=-5.0, stop=5.0, num_sigmoid=50, basis_width_scalar=1.0) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist) -> torch.Tensor + + + +.. py:class:: LinearSigmoidSmearing(start: float = -5.0, stop: float = 5.0, num_sigmoid: int = 50, basis_width_scalar: float = 1.0) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist) -> torch.Tensor + + + +.. py:class:: SiLUSmearing(start: float = -5.0, stop: float = 5.0, num_output: int = 50, basis_width_scalar: float = 1.0) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(dist) + + + diff --git a/_sources/autoapi/core/models/scn/spherical_harmonics/index.rst b/_sources/autoapi/core/models/scn/spherical_harmonics/index.rst new file mode 100644 index 000000000..b19697c1f --- /dev/null +++ b/_sources/autoapi/core/models/scn/spherical_harmonics/index.rst @@ -0,0 +1,102 @@ +:py:mod:`core.models.scn.spherical_harmonics` +============================================= + +.. py:module:: core.models.scn.spherical_harmonics + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.scn.spherical_harmonics.SphericalHarmonicsHelper + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.models.scn.spherical_harmonics.wigner_D + core.models.scn.spherical_harmonics._z_rot_mat + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.models.scn.spherical_harmonics._Jd + + +.. py:data:: _Jd + + + +.. py:class:: SphericalHarmonicsHelper(lmax: int, mmax: int, num_taps: int, num_bands: int) + + + Helper functions for spherical harmonics calculations and representations + + :param lmax: Maximum degree of the spherical harmonics + :type lmax: int + :param mmax: Maximum order of the spherical harmonics + :type mmax: int + :param num_taps: Number of taps or rotations (1 or otherwise set automatically based on mmax) + :type num_taps: int + :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) + :type num_bands: int + + .. py:method:: InitWignerDMatrix(edge_rot_mat) -> None + + + .. py:method:: InitYRotMapping() + + + .. py:method:: ToGrid(x, channels) -> torch.Tensor + + + .. py:method:: FromGrid(x_grid, channels) -> torch.Tensor + + + .. py:method:: CombineYRotations(x) -> torch.Tensor + + + .. py:method:: Rotate(x) -> torch.Tensor + + + .. py:method:: FlipGrid(grid, num_channels: int) -> torch.Tensor + + + .. py:method:: RotateInv(x) -> torch.Tensor + + + .. py:method:: RotateWigner(x, wigner) -> torch.Tensor + + + .. py:method:: RotationMatrix(rot_x: float, rot_y: float, rot_z: float) -> torch.Tensor + + + .. py:method:: RotationToWignerDMatrix(edge_rot_mat, start_lmax, end_lmax) + + + +.. py:function:: wigner_D(l, alpha, beta, gamma) + + +.. py:function:: _z_rot_mat(angle, l) + + diff --git a/_sources/autoapi/core/models/utils/activations/index.rst b/_sources/autoapi/core/models/utils/activations/index.rst new file mode 100644 index 000000000..23d7855b6 --- /dev/null +++ b/_sources/autoapi/core/models/utils/activations/index.rst @@ -0,0 +1,67 @@ +:py:mod:`core.models.utils.activations` +======================================= + +.. py:module:: core.models.utils.activations + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.utils.activations.Act + + + + +.. py:class:: Act(act: str, slope: float = 0.05) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input: torch.Tensor) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/utils/basis/index.rst b/_sources/autoapi/core/models/utils/basis/index.rst new file mode 100644 index 000000000..f33ce9953 --- /dev/null +++ b/_sources/autoapi/core/models/utils/basis/index.rst @@ -0,0 +1,328 @@ +:py:mod:`core.models.utils.basis` +================================= + +.. py:module:: core.models.utils.basis + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.models.utils.basis.Sine + core.models.utils.basis.SIREN + core.models.utils.basis.SINESmearing + core.models.utils.basis.GaussianSmearing + core.models.utils.basis.FourierSmearing + core.models.utils.basis.Basis + core.models.utils.basis.SphericalSmearing + + + + +.. py:class:: Sine(w0: float = 30.0) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: SIREN(layers: list[int], num_in_features: int, out_features: int, w0: float = 30.0, initializer: str | None = 'siren', c: float = 6) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(X: torch.Tensor) -> torch.Tensor + + + +.. py:class:: SINESmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: GaussianSmearing(num_in_features: int, start: int = 0, end: int = 1, num_freqs: int = 50) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: FourierSmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(x: torch.Tensor) -> torch.Tensor + + + +.. py:class:: Basis(num_in_features: int, num_freqs: int = 50, basis_type: str = 'powersine', act: str = 'ssp', sph: SphericalSmearing | None = None) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:attribute:: smearing + :type: SINESmearing | FourierSmearing | GaussianSmearing | torch.nn.Sequential + + + + .. py:method:: forward(x: torch.Tensor, edge_attr_sph: torch.Tensor | None = None) + + + +.. py:class:: SphericalSmearing(max_n: int = 10, option: str = 'all') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:attribute:: m + :type: numpy.typing.NDArray[numpy.int_] + + + + .. py:attribute:: n + :type: numpy.typing.NDArray[numpy.int_] + + + + .. py:method:: forward(xyz: torch.Tensor) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/models/utils/index.rst b/_sources/autoapi/core/models/utils/index.rst new file mode 100644 index 000000000..da195a466 --- /dev/null +++ b/_sources/autoapi/core/models/utils/index.rst @@ -0,0 +1,16 @@ +:py:mod:`core.models.utils` +=========================== + +.. py:module:: core.models.utils + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + activations/index.rst + basis/index.rst + + diff --git a/_sources/autoapi/core/modules/evaluator/index.rst b/_sources/autoapi/core/modules/evaluator/index.rst new file mode 100644 index 000000000..e8cf3abfe --- /dev/null +++ b/_sources/autoapi/core/modules/evaluator/index.rst @@ -0,0 +1,122 @@ +:py:mod:`core.modules.evaluator` +================================ + +.. py:module:: core.modules.evaluator + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.evaluator.Evaluator + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.evaluator.forcesx_mae + core.modules.evaluator.forcesx_mse + core.modules.evaluator.forcesy_mae + core.modules.evaluator.forcesy_mse + core.modules.evaluator.forcesz_mae + core.modules.evaluator.forcesz_mse + core.modules.evaluator.energy_forces_within_threshold + core.modules.evaluator.energy_within_threshold + core.modules.evaluator.average_distance_within_threshold + core.modules.evaluator.min_diff + core.modules.evaluator.cosine_similarity + core.modules.evaluator.mae + core.modules.evaluator.mse + core.modules.evaluator.magnitude_error + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.modules.evaluator.NONE + + +.. py:data:: NONE + + + +.. py:class:: Evaluator(task: str | None = None, eval_metrics: dict | None = None) + + + .. py:attribute:: task_metrics + :type: ClassVar[dict[str, str]] + + + + .. py:attribute:: task_primary_metric + :type: ClassVar[dict[str, str | None]] + + + + .. py:method:: eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics=None) + + + .. py:method:: update(key, stat, metrics) + + + +.. py:function:: forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) + + +.. py:function:: forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) + + +.. py:function:: forcesy_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) + + +.. py:function:: forcesy_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) + + +.. py:function:: forcesz_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) + + +.. py:function:: forcesz_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) + + +.. py:function:: energy_forces_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] + + +.. py:function:: energy_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] + + +.. py:function:: average_distance_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] + + +.. py:function:: min_diff(pred_pos: torch.Tensor, dft_pos: torch.Tensor, cell: torch.Tensor, pbc: torch.Tensor) + + +.. py:function:: cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) + + +.. py:function:: mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int] + + +.. py:function:: mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int] + + +.. py:function:: magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE, p: int = 2) -> dict[str, float | int] + + diff --git a/_sources/autoapi/core/modules/exponential_moving_average/index.rst b/_sources/autoapi/core/modules/exponential_moving_average/index.rst new file mode 100644 index 000000000..2ac2993cd --- /dev/null +++ b/_sources/autoapi/core/modules/exponential_moving_average/index.rst @@ -0,0 +1,100 @@ +:py:mod:`core.modules.exponential_moving_average` +================================================= + +.. py:module:: core.modules.exponential_moving_average + +.. autoapi-nested-parse:: + + Copied (and improved) from: + https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py (MIT license) + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.exponential_moving_average.ExponentialMovingAverage + + + + +.. py:class:: ExponentialMovingAverage(parameters: collections.abc.Iterable[torch.nn.Parameter], decay: float, use_num_updates: bool = False) + + + Maintains (exponential) moving average of a set of parameters. + + :param parameters: Iterable of `torch.nn.Parameter` (typically from + `model.parameters()`). + :param decay: The exponential decay. + :param use_num_updates: Whether to use number of updates when computing + averages. + + .. py:method:: _get_parameters(parameters: collections.abc.Iterable[torch.nn.Parameter] | None) -> collections.abc.Iterable[torch.nn.Parameter] + + + .. py:method:: update(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None + + Update currently maintained parameters. + + Call this every time the parameters are updated, such as the result of + the `optimizer.step()` call. + + :param parameters: Iterable of `torch.nn.Parameter`; usually the same set of + parameters used to initialize this object. If `None`, the + parameters with which this `ExponentialMovingAverage` was + initialized will be used. + + + .. py:method:: copy_to(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None + + Copy current parameters into given collection of parameters. + + :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored moving averages. If `None`, the + parameters with which this `ExponentialMovingAverage` was + initialized will be used. + + + .. py:method:: store(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None + + Save the current parameters for restoring later. + + :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. If `None`, the parameters of with which this + `ExponentialMovingAverage` was initialized will be used. + + + .. py:method:: restore(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None + + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + + :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. If `None`, the + parameters with which this `ExponentialMovingAverage` was + initialized will be used. + + + .. py:method:: state_dict() -> dict + + Returns the state of the ExponentialMovingAverage as a dict. + + + .. py:method:: load_state_dict(state_dict: dict) -> None + + Loads the ExponentialMovingAverage state. + + :param state_dict: EMA state. Should be an object returned + from a call to :meth:`state_dict`. + :type state_dict: dict + + + diff --git a/_sources/autoapi/core/modules/index.rst b/_sources/autoapi/core/modules/index.rst new file mode 100644 index 000000000..ea4adb9a5 --- /dev/null +++ b/_sources/autoapi/core/modules/index.rst @@ -0,0 +1,37 @@ +:py:mod:`core.modules` +====================== + +.. py:module:: core.modules + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + scaling/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + evaluator/index.rst + exponential_moving_average/index.rst + loss/index.rst + normalizer/index.rst + scheduler/index.rst + transforms/index.rst + + diff --git a/_sources/autoapi/core/modules/loss/index.rst b/_sources/autoapi/core/modules/loss/index.rst new file mode 100644 index 000000000..079bb7af3 --- /dev/null +++ b/_sources/autoapi/core/modules/loss/index.rst @@ -0,0 +1,141 @@ +:py:mod:`core.modules.loss` +=========================== + +.. py:module:: core.modules.loss + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.loss.L2MAELoss + core.modules.loss.AtomwiseL2Loss + core.modules.loss.DDPLoss + + + + +.. py:class:: L2MAELoss(reduction: str = 'mean') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input: torch.Tensor, target: torch.Tensor) + + + +.. py:class:: AtomwiseL2Loss(reduction: str = 'mean') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) + + + +.. py:class:: DDPLoss(loss_fn, loss_name: str = 'mae', reduction: str = 'mean') + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor | None = None, batch_size: int | None = None) + + + diff --git a/_sources/autoapi/core/modules/normalizer/index.rst b/_sources/autoapi/core/modules/normalizer/index.rst new file mode 100644 index 000000000..56cf72000 --- /dev/null +++ b/_sources/autoapi/core/modules/normalizer/index.rst @@ -0,0 +1,48 @@ +:py:mod:`core.modules.normalizer` +================================= + +.. py:module:: core.modules.normalizer + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.normalizer.Normalizer + + + + +.. py:class:: Normalizer(tensor: torch.Tensor | None = None, mean=None, std=None, device=None) + + + Normalize a Tensor and restore it later. + + .. py:method:: to(device) -> None + + + .. py:method:: norm(tensor: torch.Tensor) -> torch.Tensor + + + .. py:method:: denorm(normed_tensor: torch.Tensor) -> torch.Tensor + + + .. py:method:: state_dict() + + + .. py:method:: load_state_dict(state_dict) -> None + + + diff --git a/_sources/autoapi/core/modules/scaling/compat/index.rst b/_sources/autoapi/core/modules/scaling/compat/index.rst new file mode 100644 index 000000000..a3e7f7339 --- /dev/null +++ b/_sources/autoapi/core/modules/scaling/compat/index.rst @@ -0,0 +1,43 @@ +:py:mod:`core.modules.scaling.compat` +===================================== + +.. py:module:: core.modules.scaling.compat + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.compat._load_scale_dict + core.modules.scaling.compat.load_scales_compat + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.compat.ScaleDict + + +.. py:data:: ScaleDict + + + +.. py:function:: _load_scale_dict(scale_file: str | ScaleDict | None) + + Loads scale factors from either: + - a JSON file mapping scale factor names to scale values + - a python dictionary pickled object (loaded using `torch.load`) mapping scale factor names to scale values + - a dictionary mapping scale factor names to scale values + + +.. py:function:: load_scales_compat(module: torch.nn.Module, scale_file: str | ScaleDict | None) -> None + + diff --git a/_sources/autoapi/core/modules/scaling/fit/index.rst b/_sources/autoapi/core/modules/scaling/fit/index.rst new file mode 100644 index 000000000..b36129e1a --- /dev/null +++ b/_sources/autoapi/core/modules/scaling/fit/index.rst @@ -0,0 +1,30 @@ +:py:mod:`core.modules.scaling.fit` +================================== + +.. py:module:: core.modules.scaling.fit + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.fit._prefilled_input + core.modules.scaling.fit._train_batch + core.modules.scaling.fit.main + + + +.. py:function:: _prefilled_input(prompt: str, prefill: str = '') -> str + + +.. py:function:: _train_batch(trainer: fairchem.core.trainers.base_trainer.BaseTrainer, batch) -> None + + +.. py:function:: main(*, num_batches: int = 16) -> None + + diff --git a/_sources/autoapi/core/modules/scaling/index.rst b/_sources/autoapi/core/modules/scaling/index.rst new file mode 100644 index 000000000..b04ed10c6 --- /dev/null +++ b/_sources/autoapi/core/modules/scaling/index.rst @@ -0,0 +1,116 @@ +:py:mod:`core.modules.scaling` +============================== + +.. py:module:: core.modules.scaling + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + compat/index.rst + fit/index.rst + scale_factor/index.rst + util/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.ScaleFactor + + + + +.. py:class:: ScaleFactor(name: str | None = None, enforce_consistency: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:property:: fitted + :type: bool + + + .. py:attribute:: scale_factor + :type: torch.Tensor + + + + .. py:attribute:: name + :type: str | None + + + + .. py:attribute:: index_fn + :type: IndexFn | None + + + + .. py:attribute:: stats + :type: _Stats | None + + + + .. py:method:: _enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) -> None + + + .. py:method:: reset_() -> None + + + .. py:method:: set_(scale: float | torch.Tensor) -> None + + + .. py:method:: initialize_(*, index_fn: IndexFn | None = None) -> None + + + .. py:method:: fit_context_() + + + .. py:method:: fit_() + + + .. py:method:: _observe(x: torch.Tensor, ref: torch.Tensor | None = None) -> None + + + .. py:method:: forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/modules/scaling/scale_factor/index.rst b/_sources/autoapi/core/modules/scaling/scale_factor/index.rst new file mode 100644 index 000000000..c72187413 --- /dev/null +++ b/_sources/autoapi/core/modules/scaling/scale_factor/index.rst @@ -0,0 +1,159 @@ +:py:mod:`core.modules.scaling.scale_factor` +=========================================== + +.. py:module:: core.modules.scaling.scale_factor + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.scale_factor._Stats + core.modules.scaling.scale_factor.ScaleFactor + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.scale_factor._check_consistency + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.scale_factor.IndexFn + + +.. py:class:: _Stats + + + Bases: :py:obj:`TypedDict` + + dict() -> new empty dictionary + dict(mapping) -> new dictionary initialized from a mapping object's + (key, value) pairs + dict(iterable) -> new dictionary initialized as if via: + d = {} + for k, v in iterable: + d[k] = v + dict(**kwargs) -> new dictionary initialized with the name=value pairs + in the keyword argument list. For example: dict(one=1, two=2) + + .. py:attribute:: variance_in + :type: float + + + + .. py:attribute:: variance_out + :type: float + + + + .. py:attribute:: n_samples + :type: int + + + + +.. py:data:: IndexFn + + + +.. py:function:: _check_consistency(old: torch.Tensor, new: torch.Tensor, key: str) -> None + + +.. py:class:: ScaleFactor(name: str | None = None, enforce_consistency: bool = True) + + + Bases: :py:obj:`torch.nn.Module` + + Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + + .. py:property:: fitted + :type: bool + + + .. py:attribute:: scale_factor + :type: torch.Tensor + + + + .. py:attribute:: name + :type: str | None + + + + .. py:attribute:: index_fn + :type: IndexFn | None + + + + .. py:attribute:: stats + :type: _Stats | None + + + + .. py:method:: _enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) -> None + + + .. py:method:: reset_() -> None + + + .. py:method:: set_(scale: float | torch.Tensor) -> None + + + .. py:method:: initialize_(*, index_fn: IndexFn | None = None) -> None + + + .. py:method:: fit_context_() + + + .. py:method:: fit_() + + + .. py:method:: _observe(x: torch.Tensor, ref: torch.Tensor | None = None) -> None + + + .. py:method:: forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) -> torch.Tensor + + + diff --git a/_sources/autoapi/core/modules/scaling/util/index.rst b/_sources/autoapi/core/modules/scaling/util/index.rst new file mode 100644 index 000000000..d159f79c7 --- /dev/null +++ b/_sources/autoapi/core/modules/scaling/util/index.rst @@ -0,0 +1,22 @@ +:py:mod:`core.modules.scaling.util` +=================================== + +.. py:module:: core.modules.scaling.util + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.scaling.util.ensure_fitted + + + +.. py:function:: ensure_fitted(module: torch.nn.Module, warn: bool = False) -> None + + diff --git a/_sources/autoapi/core/modules/scheduler/index.rst b/_sources/autoapi/core/modules/scheduler/index.rst new file mode 100644 index 000000000..3668a763c --- /dev/null +++ b/_sources/autoapi/core/modules/scheduler/index.rst @@ -0,0 +1,46 @@ +:py:mod:`core.modules.scheduler` +================================ + +.. py:module:: core.modules.scheduler + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.scheduler.LRScheduler + + + + +.. py:class:: LRScheduler(optimizer, config) + + + Learning rate scheduler class for torch.optim learning rate schedulers + + .. rubric:: Notes + + If no learning rate scheduler is specified in the config the default + scheduler is warmup_lr_lambda (fairchem.core.common.utils) not no scheduler, + this is for backward-compatibility reasons. To run without a lr scheduler + specify scheduler: "Null" in the optim section of the config. + + :param optimizer: torch optim object + :type optimizer: obj + :param config: Optim dict from the input config + :type config: dict + + .. py:method:: step(metrics=None, epoch=None) -> None + + + .. py:method:: filter_kwargs(config) + + + .. py:method:: get_lr() + + + diff --git a/_sources/autoapi/core/modules/transforms/index.rst b/_sources/autoapi/core/modules/transforms/index.rst new file mode 100644 index 000000000..b05f54993 --- /dev/null +++ b/_sources/autoapi/core/modules/transforms/index.rst @@ -0,0 +1,37 @@ +:py:mod:`core.modules.transforms` +================================= + +.. py:module:: core.modules.transforms + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.modules.transforms.DataTransforms + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.modules.transforms.decompose_tensor + + + +.. py:class:: DataTransforms(config) + + + .. py:method:: __call__(data_object) + + + +.. py:function:: decompose_tensor(data_object, config) -> torch_geometric.data.Data + + diff --git a/_sources/autoapi/core/preprocessing/atoms_to_graphs/index.rst b/_sources/autoapi/core/preprocessing/atoms_to_graphs/index.rst new file mode 100644 index 000000000..d3cd5b3d7 --- /dev/null +++ b/_sources/autoapi/core/preprocessing/atoms_to_graphs/index.rst @@ -0,0 +1,206 @@ +:py:mod:`core.preprocessing.atoms_to_graphs` +============================================ + +.. py:module:: core.preprocessing.atoms_to_graphs + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.preprocessing.atoms_to_graphs.AtomsToGraphs + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.preprocessing.atoms_to_graphs.AseAtomsAdaptor + core.preprocessing.atoms_to_graphs.shell + + +.. py:data:: AseAtomsAdaptor + + + +.. py:data:: shell + + + +.. py:class:: AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None) + + + A class to help convert periodic atomic structures to graphs. + + The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts + them into graph representations for use in PyTorch. The primary purpose of this class is to determine the + nearest neighbors within some radius around each individual atom, taking into account PBC, and set the + pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information + are put into a PyTorch geometric data object for use with PyTorch. + + :param max_neigh: Maximum number of neighbors to consider. + :type max_neigh: int + :param radius: Cutoff radius in Angstroms to search for neighbors. + :type radius: int or float + :param r_energy: Return the energy with other properties. Default is False, so the energy will not be returned. + :type r_energy: bool + :param r_forces: Return the forces with other properties. Default is False, so the forces will not be returned. + :type r_forces: bool + :param r_stress: Return the stress with other properties. Default is False, so the stress will not be returned. + :type r_stress: bool + :param r_distances: Return the distances with other properties. + :type r_distances: bool + :param Default is False: + :param so the distances will not be returned.: + :param r_edges: Return interatomic edges with other properties. Default is True, so edges will be returned. + :type r_edges: bool + :param r_fixed: Return a binary vector with flags for fixed (1) vs free (0) atoms. + :type r_fixed: bool + :param Default is True: + :param so the fixed indices will be returned.: + :param r_pbc: Return the periodic boundary conditions with other properties. + :type r_pbc: bool + :param Default is False: + :param so the periodic boundary conditions will not be returned.: + :param r_data_keys: Return values corresponding to given keys in atoms.info data with other + :type r_data_keys: sequence of str, optional + :param properties. Default is None: + :param so no data will be returned as properties.: + + .. attribute:: max_neigh + + Maximum number of neighbors to consider. + + :type: int + + .. attribute:: radius + + Cutoff radius in Angstoms to search for neighbors. + + :type: int or float + + .. attribute:: r_energy + + Return the energy with other properties. Default is False, so the energy will not be returned. + + :type: bool + + .. attribute:: r_forces + + Return the forces with other properties. Default is False, so the forces will not be returned. + + :type: bool + + .. attribute:: r_stress + + Return the stress with other properties. Default is False, so the stress will not be returned. + + :type: bool + + .. attribute:: r_distances + + Return the distances with other properties. + + :type: bool + + .. attribute:: Default is False, so the distances will not be returned. + + + + .. attribute:: r_edges + + Return interatomic edges with other properties. Default is True, so edges will be returned. + + :type: bool + + .. attribute:: r_fixed + + Return a binary vector with flags for fixed (1) vs free (0) atoms. + + :type: bool + + .. attribute:: Default is True, so the fixed indices will be returned. + + + + .. attribute:: r_pbc + + Return the periodic boundary conditions with other properties. + + :type: bool + + .. attribute:: Default is False, so the periodic boundary conditions will not be returned. + + + + .. attribute:: r_data_keys + + Return values corresponding to given keys in atoms.info data with other + + :type: sequence of str, optional + + .. attribute:: properties. Default is None, so no data will be returned as properties. + + + + .. py:method:: _get_neighbors_pymatgen(atoms: ase.Atoms) + + Preforms nearest neighbor search and returns edge index, distances, + and cell offsets + + + .. py:method:: _reshape_features(c_index, n_index, n_distance, offsets) + + Stack center and neighbor index and reshapes distances, + takes in np.arrays and returns torch tensors + + + .. py:method:: convert(atoms: ase.Atoms, sid=None) + + Convert a single atomic structure to a graph. + + :param atoms: An ASE atoms object. + :type atoms: ase.atoms.Atoms + :param sid: An identifier that can be used to track the structure in downstream + :type sid: uniquely identifying object + :param tasks. Common sids used in OCP datasets include unique strings or integers.: + + :returns: A torch geometic data object with positions, atomic_numbers, tags, + and optionally, energy, forces, distances, edges, and periodic boundary conditions. + Optional properties can included by setting r_property=True when constructing the class. + :rtype: data (torch_geometric.data.Data) + + + .. py:method:: convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False) + + Convert all atoms objects in a list or in an ase.db to graphs. + + :param atoms_collection: + :type atoms_collection: list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database + :param Either a list of ASE atoms objects or an ASE database.: + :param processed_file_path: + :type processed_file_path: str + :param A string of the path to where the processed file will be written. Default is None.: + :param collate_and_save: A boolean to collate and save or not. Default is False, so will not write a file. + :type collate_and_save: bool + + :returns: A list of torch geometric data objects containing molecular graph info and properties. + :rtype: data_list (list of torch_geometric.data.Data) + + + diff --git a/_sources/autoapi/core/preprocessing/index.rst b/_sources/autoapi/core/preprocessing/index.rst new file mode 100644 index 000000000..f6acc52e3 --- /dev/null +++ b/_sources/autoapi/core/preprocessing/index.rst @@ -0,0 +1,198 @@ +:py:mod:`core.preprocessing` +============================ + +.. py:module:: core.preprocessing + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + atoms_to_graphs/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.preprocessing.AtomsToGraphs + + + + +.. py:class:: AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None) + + + A class to help convert periodic atomic structures to graphs. + + The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts + them into graph representations for use in PyTorch. The primary purpose of this class is to determine the + nearest neighbors within some radius around each individual atom, taking into account PBC, and set the + pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information + are put into a PyTorch geometric data object for use with PyTorch. + + :param max_neigh: Maximum number of neighbors to consider. + :type max_neigh: int + :param radius: Cutoff radius in Angstroms to search for neighbors. + :type radius: int or float + :param r_energy: Return the energy with other properties. Default is False, so the energy will not be returned. + :type r_energy: bool + :param r_forces: Return the forces with other properties. Default is False, so the forces will not be returned. + :type r_forces: bool + :param r_stress: Return the stress with other properties. Default is False, so the stress will not be returned. + :type r_stress: bool + :param r_distances: Return the distances with other properties. + :type r_distances: bool + :param Default is False: + :param so the distances will not be returned.: + :param r_edges: Return interatomic edges with other properties. Default is True, so edges will be returned. + :type r_edges: bool + :param r_fixed: Return a binary vector with flags for fixed (1) vs free (0) atoms. + :type r_fixed: bool + :param Default is True: + :param so the fixed indices will be returned.: + :param r_pbc: Return the periodic boundary conditions with other properties. + :type r_pbc: bool + :param Default is False: + :param so the periodic boundary conditions will not be returned.: + :param r_data_keys: Return values corresponding to given keys in atoms.info data with other + :type r_data_keys: sequence of str, optional + :param properties. Default is None: + :param so no data will be returned as properties.: + + .. attribute:: max_neigh + + Maximum number of neighbors to consider. + + :type: int + + .. attribute:: radius + + Cutoff radius in Angstoms to search for neighbors. + + :type: int or float + + .. attribute:: r_energy + + Return the energy with other properties. Default is False, so the energy will not be returned. + + :type: bool + + .. attribute:: r_forces + + Return the forces with other properties. Default is False, so the forces will not be returned. + + :type: bool + + .. attribute:: r_stress + + Return the stress with other properties. Default is False, so the stress will not be returned. + + :type: bool + + .. attribute:: r_distances + + Return the distances with other properties. + + :type: bool + + .. attribute:: Default is False, so the distances will not be returned. + + + + .. attribute:: r_edges + + Return interatomic edges with other properties. Default is True, so edges will be returned. + + :type: bool + + .. attribute:: r_fixed + + Return a binary vector with flags for fixed (1) vs free (0) atoms. + + :type: bool + + .. attribute:: Default is True, so the fixed indices will be returned. + + + + .. attribute:: r_pbc + + Return the periodic boundary conditions with other properties. + + :type: bool + + .. attribute:: Default is False, so the periodic boundary conditions will not be returned. + + + + .. attribute:: r_data_keys + + Return values corresponding to given keys in atoms.info data with other + + :type: sequence of str, optional + + .. attribute:: properties. Default is None, so no data will be returned as properties. + + + + .. py:method:: _get_neighbors_pymatgen(atoms: ase.Atoms) + + Preforms nearest neighbor search and returns edge index, distances, + and cell offsets + + + .. py:method:: _reshape_features(c_index, n_index, n_distance, offsets) + + Stack center and neighbor index and reshapes distances, + takes in np.arrays and returns torch tensors + + + .. py:method:: convert(atoms: ase.Atoms, sid=None) + + Convert a single atomic structure to a graph. + + :param atoms: An ASE atoms object. + :type atoms: ase.atoms.Atoms + :param sid: An identifier that can be used to track the structure in downstream + :type sid: uniquely identifying object + :param tasks. Common sids used in OCP datasets include unique strings or integers.: + + :returns: A torch geometic data object with positions, atomic_numbers, tags, + and optionally, energy, forces, distances, edges, and periodic boundary conditions. + Optional properties can included by setting r_property=True when constructing the class. + :rtype: data (torch_geometric.data.Data) + + + .. py:method:: convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False) + + Convert all atoms objects in a list or in an ase.db to graphs. + + :param atoms_collection: + :type atoms_collection: list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database + :param Either a list of ASE atoms objects or an ASE database.: + :param processed_file_path: + :type processed_file_path: str + :param A string of the path to where the processed file will be written. Default is None.: + :param collate_and_save: A boolean to collate and save or not. Default is False, so will not write a file. + :type collate_and_save: bool + + :returns: A list of torch geometric data objects containing molecular graph info and properties. + :rtype: data_list (list of torch_geometric.data.Data) + + + diff --git a/_sources/autoapi/core/scripts/download_data/index.rst b/_sources/autoapi/core/scripts/download_data/index.rst new file mode 100644 index 000000000..6f4d4aea8 --- /dev/null +++ b/_sources/autoapi/core/scripts/download_data/index.rst @@ -0,0 +1,67 @@ +:py:mod:`core.scripts.download_data` +==================================== + +.. py:module:: core.scripts.download_data + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.download_data.get_data + core.scripts.download_data.uncompress_data + core.scripts.download_data.preprocess_data + core.scripts.download_data.verify_count + core.scripts.download_data.cleanup + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.download_data.DOWNLOAD_LINKS_s2ef + core.scripts.download_data.DOWNLOAD_LINKS_is2re + core.scripts.download_data.S2EF_COUNTS + core.scripts.download_data.parser + + +.. py:data:: DOWNLOAD_LINKS_s2ef + :type: dict[str, dict[str, str]] + + + +.. py:data:: DOWNLOAD_LINKS_is2re + :type: dict[str, str] + + + +.. py:data:: S2EF_COUNTS + + + +.. py:function:: get_data(datadir: str, task: str, split: str | None, del_intmd_files: bool) -> None + + +.. py:function:: uncompress_data(compressed_dir: str) -> str + + +.. py:function:: preprocess_data(uncompressed_dir: str, output_path: str) -> None + + +.. py:function:: verify_count(output_path: str, task: str, split: str) -> None + + +.. py:function:: cleanup(filename: str, dirname: str) -> None + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/core/scripts/gif_maker_parallelized/index.rst b/_sources/autoapi/core/scripts/gif_maker_parallelized/index.rst new file mode 100644 index 000000000..234e708f9 --- /dev/null +++ b/_sources/autoapi/core/scripts/gif_maker_parallelized/index.rst @@ -0,0 +1,57 @@ +:py:mod:`core.scripts.gif_maker_parallelized` +============================================= + +.. py:module:: core.scripts.gif_maker_parallelized + +.. autoapi-nested-parse:: + + Script to generate gifs from traj + + Note: + This is just a quick way to generate gifs and visalizations from traj, there are many parameters and settings in the code that people can vary to make visualizations better. We have chosen these settings as this seem to work fine for most of our systems. + + Requirements: + + povray + ffmpeg + ase==3.21 + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.gif_maker_parallelized.pov_from_atoms + core.scripts.gif_maker_parallelized.parallelize_generation + core.scripts.gif_maker_parallelized.get_parser + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.gif_maker_parallelized.parser + + +.. py:function:: pov_from_atoms(mp_args) -> None + + +.. py:function:: parallelize_generation(traj_path, out_path: str, n_procs) -> None + + +.. py:function:: get_parser() -> argparse.ArgumentParser + + +.. py:data:: parser + :type: argparse.ArgumentParser + + + diff --git a/_sources/autoapi/core/scripts/hpo/index.rst b/_sources/autoapi/core/scripts/hpo/index.rst new file mode 100644 index 000000000..2a5810b28 --- /dev/null +++ b/_sources/autoapi/core/scripts/hpo/index.rst @@ -0,0 +1,24 @@ +:py:mod:`core.scripts.hpo` +========================== + +.. py:module:: core.scripts.hpo + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + run_tune/index.rst + run_tune_pbt/index.rst + + diff --git a/_sources/autoapi/core/scripts/hpo/run_tune/index.rst b/_sources/autoapi/core/scripts/hpo/run_tune/index.rst new file mode 100644 index 000000000..c14fbfeb0 --- /dev/null +++ b/_sources/autoapi/core/scripts/hpo/run_tune/index.rst @@ -0,0 +1,26 @@ +:py:mod:`core.scripts.hpo.run_tune` +=================================== + +.. py:module:: core.scripts.hpo.run_tune + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.hpo.run_tune.ocp_trainable + core.scripts.hpo.run_tune.main + + + +.. py:function:: ocp_trainable(config, checkpoint_dir=None) -> None + + +.. py:function:: main() -> None + + diff --git a/_sources/autoapi/core/scripts/hpo/run_tune_pbt/index.rst b/_sources/autoapi/core/scripts/hpo/run_tune_pbt/index.rst new file mode 100644 index 000000000..b2a150e80 --- /dev/null +++ b/_sources/autoapi/core/scripts/hpo/run_tune_pbt/index.rst @@ -0,0 +1,26 @@ +:py:mod:`core.scripts.hpo.run_tune_pbt` +======================================= + +.. py:module:: core.scripts.hpo.run_tune_pbt + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.hpo.run_tune_pbt.ocp_trainable + core.scripts.hpo.run_tune_pbt.main + + + +.. py:function:: ocp_trainable(config, checkpoint_dir=None) -> None + + +.. py:function:: main() -> None + + diff --git a/_sources/autoapi/core/scripts/index.rst b/_sources/autoapi/core/scripts/index.rst new file mode 100644 index 000000000..690934af7 --- /dev/null +++ b/_sources/autoapi/core/scripts/index.rst @@ -0,0 +1,39 @@ +:py:mod:`core.scripts` +====================== + +.. py:module:: core.scripts + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + hpo/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + download_data/index.rst + gif_maker_parallelized/index.rst + make_challenge_submission_file/index.rst + make_lmdb_sizes/index.rst + make_submission_file/index.rst + preprocess_ef/index.rst + preprocess_relaxed/index.rst + uncompress/index.rst + + diff --git a/_sources/autoapi/core/scripts/make_challenge_submission_file/index.rst b/_sources/autoapi/core/scripts/make_challenge_submission_file/index.rst new file mode 100644 index 000000000..c9787ea4f --- /dev/null +++ b/_sources/autoapi/core/scripts/make_challenge_submission_file/index.rst @@ -0,0 +1,54 @@ +:py:mod:`core.scripts.make_challenge_submission_file` +===================================================== + +.. py:module:: core.scripts.make_challenge_submission_file + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + ONLY for use in the NeurIPS 2021 Open Catalyst Challenge. For all other submissions + please use make_submission_file.py. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_challenge_submission_file.write_is2re_relaxations + core.scripts.make_challenge_submission_file.write_predictions + core.scripts.make_challenge_submission_file.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_challenge_submission_file.parser + + +.. py:function:: write_is2re_relaxations(path: str, filename: str, hybrid) -> None + + +.. py:function:: write_predictions(path: str, filename: str) -> None + + +.. py:function:: main(args: argparse.Namespace) -> None + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/core/scripts/make_lmdb_sizes/index.rst b/_sources/autoapi/core/scripts/make_lmdb_sizes/index.rst new file mode 100644 index 000000000..d900bb153 --- /dev/null +++ b/_sources/autoapi/core/scripts/make_lmdb_sizes/index.rst @@ -0,0 +1,44 @@ +:py:mod:`core.scripts.make_lmdb_sizes` +====================================== + +.. py:module:: core.scripts.make_lmdb_sizes + +.. autoapi-nested-parse:: + + This script provides the functionality to generate metadata.npz files necessary + for load_balancing the DataLoader. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_lmdb_sizes.get_data + core.scripts.make_lmdb_sizes.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_lmdb_sizes.parser + + +.. py:function:: get_data(index) + + +.. py:function:: main(args) -> None + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/core/scripts/make_submission_file/index.rst b/_sources/autoapi/core/scripts/make_submission_file/index.rst new file mode 100644 index 000000000..8c89e0864 --- /dev/null +++ b/_sources/autoapi/core/scripts/make_submission_file/index.rst @@ -0,0 +1,55 @@ +:py:mod:`core.scripts.make_submission_file` +=========================================== + +.. py:module:: core.scripts.make_submission_file + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_submission_file.write_is2re_relaxations + core.scripts.make_submission_file.write_predictions + core.scripts.make_submission_file.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.make_submission_file.SPLITS + core.scripts.make_submission_file.parser + + +.. py:data:: SPLITS + + + +.. py:function:: write_is2re_relaxations(args) -> None + + +.. py:function:: write_predictions(args) -> None + + +.. py:function:: main(args: argparse.Namespace) -> None + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/core/scripts/preprocess_ef/index.rst b/_sources/autoapi/core/scripts/preprocess_ef/index.rst new file mode 100644 index 000000000..84a2afad2 --- /dev/null +++ b/_sources/autoapi/core/scripts/preprocess_ef/index.rst @@ -0,0 +1,49 @@ +:py:mod:`core.scripts.preprocess_ef` +==================================== + +.. py:module:: core.scripts.preprocess_ef + +.. autoapi-nested-parse:: + + Creates LMDB files with extracted graph features from provided *.extxyz files + for the S2EF task. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.preprocess_ef.write_images_to_lmdb + core.scripts.preprocess_ef.main + core.scripts.preprocess_ef.get_parser + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.preprocess_ef.parser + + +.. py:function:: write_images_to_lmdb(mp_arg) + + +.. py:function:: main(args: argparse.Namespace) -> None + + +.. py:function:: get_parser() -> argparse.ArgumentParser + + +.. py:data:: parser + :type: argparse.ArgumentParser + + + diff --git a/_sources/autoapi/core/scripts/preprocess_relaxed/index.rst b/_sources/autoapi/core/scripts/preprocess_relaxed/index.rst new file mode 100644 index 000000000..5e96e685c --- /dev/null +++ b/_sources/autoapi/core/scripts/preprocess_relaxed/index.rst @@ -0,0 +1,44 @@ +:py:mod:`core.scripts.preprocess_relaxed` +========================================= + +.. py:module:: core.scripts.preprocess_relaxed + +.. autoapi-nested-parse:: + + Creates LMDB files with extracted graph features from provided *.extxyz files + for the S2EF task. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.preprocess_relaxed.write_images_to_lmdb + core.scripts.preprocess_relaxed.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.preprocess_relaxed.parser + + +.. py:function:: write_images_to_lmdb(mp_arg) -> None + + +.. py:function:: main(args, split) -> None + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/core/scripts/uncompress/index.rst b/_sources/autoapi/core/scripts/uncompress/index.rst new file mode 100644 index 000000000..99d9fa193 --- /dev/null +++ b/_sources/autoapi/core/scripts/uncompress/index.rst @@ -0,0 +1,53 @@ +:py:mod:`core.scripts.uncompress` +================================= + +.. py:module:: core.scripts.uncompress + +.. autoapi-nested-parse:: + + Uncompresses downloaded S2EF datasets to be used by the LMDB preprocessing + script - preprocess_ef.py + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.uncompress.read_lzma + core.scripts.uncompress.decompress_list_of_files + core.scripts.uncompress.get_parser + core.scripts.uncompress.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.scripts.uncompress.parser + + +.. py:function:: read_lzma(inpfile: str, outfile: str) -> None + + +.. py:function:: decompress_list_of_files(ip_op_pair: tuple[str, str]) -> None + + +.. py:function:: get_parser() -> argparse.ArgumentParser + + +.. py:function:: main(args: argparse.Namespace) -> None + + +.. py:data:: parser + :type: argparse.ArgumentParser + + + diff --git a/_sources/autoapi/core/tasks/index.rst b/_sources/autoapi/core/tasks/index.rst new file mode 100644 index 000000000..d82f71aaf --- /dev/null +++ b/_sources/autoapi/core/tasks/index.rst @@ -0,0 +1,70 @@ +:py:mod:`core.tasks` +==================== + +.. py:module:: core.tasks + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + task/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tasks.PredictTask + core.tasks.RelaxationTask + core.tasks.TrainTask + core.tasks.ValidateTask + + + + +.. py:class:: PredictTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + +.. py:class:: RelaxationTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + +.. py:class:: TrainTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: _process_error(e: RuntimeError) -> None + + + .. py:method:: run() -> None + + + +.. py:class:: ValidateTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + diff --git a/_sources/autoapi/core/tasks/task/index.rst b/_sources/autoapi/core/tasks/task/index.rst new file mode 100644 index 000000000..d0412859a --- /dev/null +++ b/_sources/autoapi/core/tasks/task/index.rst @@ -0,0 +1,81 @@ +:py:mod:`core.tasks.task` +========================= + +.. py:module:: core.tasks.task + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tasks.task.BaseTask + core.tasks.task.TrainTask + core.tasks.task.PredictTask + core.tasks.task.ValidateTask + core.tasks.task.RelaxationTask + + + + +.. py:class:: BaseTask(config) + + + .. py:method:: setup(trainer) -> None + + + .. py:method:: run() + :abstractmethod: + + + +.. py:class:: TrainTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: _process_error(e: RuntimeError) -> None + + + .. py:method:: run() -> None + + + +.. py:class:: PredictTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + +.. py:class:: ValidateTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + +.. py:class:: RelaxationTask(config) + + + Bases: :py:obj:`BaseTask` + + .. py:method:: run() -> None + + + diff --git a/_sources/autoapi/core/tests/common/test_ase_calculator/index.rst b/_sources/autoapi/core/tests/common/test_ase_calculator/index.rst new file mode 100644 index 000000000..fc32aab12 --- /dev/null +++ b/_sources/autoapi/core/tests/common/test_ase_calculator/index.rst @@ -0,0 +1,46 @@ +:py:mod:`core.tests.common.test_ase_calculator` +=============================================== + +.. py:module:: core.tests.common.test_ase_calculator + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.common.test_ase_calculator.atoms + core.tests.common.test_ase_calculator.checkpoint_path + core.tests.common.test_ase_calculator.test_calculator_setup + core.tests.common.test_ase_calculator.test_relaxation_final_energy + core.tests.common.test_ase_calculator.test_random_seed_final_energy + + + +.. py:function:: atoms() -> ase.Atoms + + +.. py:function:: checkpoint_path(request, tmp_path) + + +.. py:function:: test_calculator_setup(checkpoint_path) + + +.. py:function:: test_relaxation_final_energy(atoms, tmp_path, snapshot) -> None + + +.. py:function:: test_random_seed_final_energy(atoms, tmp_path) + + diff --git a/_sources/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.rst b/_sources/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.rst new file mode 100644 index 000000000..3e3b6f608 --- /dev/null +++ b/_sources/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.rst @@ -0,0 +1,112 @@ +:py:mod:`core.tests.common.test_data_parallel_batch_sampler` +============================================================ + +.. py:module:: core.tests.common.test_data_parallel_batch_sampler + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.common.test_data_parallel_batch_sampler._temp_file + core.tests.common.test_data_parallel_batch_sampler.valid_path_dataset + core.tests.common.test_data_parallel_batch_sampler.invalid_path_dataset + core.tests.common.test_data_parallel_batch_sampler.invalid_dataset + core.tests.common.test_data_parallel_batch_sampler.test_lowercase + core.tests.common.test_data_parallel_batch_sampler.test_invalid_mode + core.tests.common.test_data_parallel_batch_sampler.test_invalid_dataset + core.tests.common.test_data_parallel_batch_sampler.test_invalid_path_dataset + core.tests.common.test_data_parallel_batch_sampler.test_valid_dataset + core.tests.common.test_data_parallel_batch_sampler.test_disabled + core.tests.common.test_data_parallel_batch_sampler.test_single_node + core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_noshuffle + core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_vs_distributed_sampler + core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler + core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas + core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas_drop_last + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.tests.common.test_data_parallel_batch_sampler.DATA + core.tests.common.test_data_parallel_batch_sampler.SIZE_ATOMS + core.tests.common.test_data_parallel_batch_sampler.SIZE_NEIGHBORS + core.tests.common.test_data_parallel_batch_sampler.T_co + + +.. py:data:: DATA + :value: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + + +.. py:data:: SIZE_ATOMS + :value: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + + +.. py:data:: SIZE_NEIGHBORS + :value: [4, 4, 4, 4, 4, 4, 4, 4, 4, 4] + + + +.. py:data:: T_co + + + +.. py:function:: _temp_file(name: str) + + +.. py:function:: valid_path_dataset() + + +.. py:function:: invalid_path_dataset() + + +.. py:function:: invalid_dataset() + + +.. py:function:: test_lowercase(invalid_dataset) -> None + + +.. py:function:: test_invalid_mode(invalid_dataset) -> None + + +.. py:function:: test_invalid_dataset(invalid_dataset) -> None + + +.. py:function:: test_invalid_path_dataset(invalid_path_dataset) -> None + + +.. py:function:: test_valid_dataset(valid_path_dataset) -> None + + +.. py:function:: test_disabled(valid_path_dataset) -> None + + +.. py:function:: test_single_node(valid_path_dataset) -> None + + +.. py:function:: test_stateful_distributed_sampler_noshuffle(valid_path_dataset) -> None + + +.. py:function:: test_stateful_distributed_sampler_vs_distributed_sampler(valid_path_dataset) -> None + + +.. py:function:: test_stateful_distributed_sampler(valid_path_dataset) -> None + + +.. py:function:: test_stateful_distributed_sampler_numreplicas(valid_path_dataset) -> None + + +.. py:function:: test_stateful_distributed_sampler_numreplicas_drop_last(valid_path_dataset) -> None + + diff --git a/_sources/autoapi/core/tests/common/test_yaml_loader/index.rst b/_sources/autoapi/core/tests/common/test_yaml_loader/index.rst new file mode 100644 index 000000000..70eb354b9 --- /dev/null +++ b/_sources/autoapi/core/tests/common/test_yaml_loader/index.rst @@ -0,0 +1,34 @@ +:py:mod:`core.tests.common.test_yaml_loader` +============================================ + +.. py:module:: core.tests.common.test_yaml_loader + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.common.test_yaml_loader.invalid_yaml_config + core.tests.common.test_yaml_loader.valid_yaml_config + core.tests.common.test_yaml_loader.test_invalid_config + core.tests.common.test_yaml_loader.test_valid_config + + + +.. py:function:: invalid_yaml_config() + + +.. py:function:: valid_yaml_config() + + +.. py:function:: test_invalid_config(invalid_yaml_config) + + +.. py:function:: test_valid_config(valid_yaml_config) + + diff --git a/_sources/autoapi/core/tests/conftest/index.rst b/_sources/autoapi/core/tests/conftest/index.rst new file mode 100644 index 000000000..5ac643304 --- /dev/null +++ b/_sources/autoapi/core/tests/conftest/index.rst @@ -0,0 +1,110 @@ +:py:mod:`core.tests.conftest` +============================= + +.. py:module:: core.tests.conftest + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.conftest.Approx + core.tests.conftest._ApproxNumpyFormatter + core.tests.conftest.ApproxExtension + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.conftest._try_parse_approx + core.tests.conftest.snapshot + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.tests.conftest.DEFAULT_RTOL + core.tests.conftest.DEFAULT_ATOL + + +.. py:data:: DEFAULT_RTOL + :value: 0.001 + + + +.. py:data:: DEFAULT_ATOL + :value: 0.001 + + + +.. py:class:: Approx(data: numpy.ndarray | list, *, rtol: float | None = None, atol: float | None = None) + + + Wrapper object for approximately compared numpy arrays. + + .. py:method:: __repr__() -> str + + Return repr(self). + + + +.. py:class:: _ApproxNumpyFormatter(data) + + + .. py:method:: __repr__() -> str + + Return repr(self). + + + +.. py:function:: _try_parse_approx(data: syrupy.types.SerializableData) -> Approx | None + + Parse the string representation of an Approx object. + We can just use eval here, since we know the string is safe. + + +.. py:class:: ApproxExtension + + + Bases: :py:obj:`syrupy.extensions.amber.AmberSnapshotExtension` + + By default, syrupy uses the __repr__ of the expected (snapshot) and actual values + to serialize them into strings. Then, it compares the strings to see if they match. + + However, this behavior is not ideal for comparing floats/ndarrays. For example, + if we have a snapshot with a float value of 0.1, and the actual value is 0.10000000000000001, + then the strings will not match, even though the values are effectively equal. + + To work around this, we override the serialize method to seralize the expected value + into a special representation. Then, we override the matches function (which originally does a + simple string comparison) to parse the expected and actual values into numpy arrays. + Finally, we compare the arrays using np.allclose. + + .. py:method:: matches(*, serialized_data: syrupy.types.SerializableData, snapshot_data: syrupy.types.SerializableData) -> bool + + + .. py:method:: serialize(data, **kwargs) + + + +.. py:function:: snapshot(snapshot) + + diff --git a/_sources/autoapi/core/tests/datasets/test_ase_datasets/index.rst b/_sources/autoapi/core/tests/datasets/test_ase_datasets/index.rst new file mode 100644 index 000000000..05ac8824a --- /dev/null +++ b/_sources/autoapi/core/tests/datasets/test_ase_datasets/index.rst @@ -0,0 +1,63 @@ +:py:mod:`core.tests.datasets.test_ase_datasets` +=============================================== + +.. py:module:: core.tests.datasets.test_ase_datasets + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.datasets.test_ase_datasets.ase_dataset + core.tests.datasets.test_ase_datasets.test_ase_dataset + core.tests.datasets.test_ase_datasets.test_ase_read_dataset + core.tests.datasets.test_ase_datasets.test_ase_metadata_guesser + core.tests.datasets.test_ase_datasets.test_db_add_delete + core.tests.datasets.test_ase_datasets.test_ase_multiread_dataset + core.tests.datasets.test_ase_datasets.test_empty_dataset + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.tests.datasets.test_ase_datasets.structures + core.tests.datasets.test_ase_datasets.calc + + +.. py:data:: structures + + + +.. py:data:: calc + + + +.. py:function:: ase_dataset(request, tmp_path_factory) + + +.. py:function:: test_ase_dataset(ase_dataset) + + +.. py:function:: test_ase_read_dataset(tmp_path) -> None + + +.. py:function:: test_ase_metadata_guesser(ase_dataset) -> None + + +.. py:function:: test_db_add_delete(tmp_path) -> None + + +.. py:function:: test_ase_multiread_dataset(tmp_path) -> None + + +.. py:function:: test_empty_dataset(tmp_path) + + diff --git a/_sources/autoapi/core/tests/datasets/test_ase_lmdb/index.rst b/_sources/autoapi/core/tests/datasets/test_ase_lmdb/index.rst new file mode 100644 index 000000000..9f3c97814 --- /dev/null +++ b/_sources/autoapi/core/tests/datasets/test_ase_lmdb/index.rst @@ -0,0 +1,82 @@ +:py:mod:`core.tests.datasets.test_ase_lmdb` +=========================================== + +.. py:module:: core.tests.datasets.test_ase_lmdb + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.datasets.test_ase_lmdb.generate_random_structure + core.tests.datasets.test_ase_lmdb.ase_lmbd_path + core.tests.datasets.test_ase_lmdb.test_aselmdb_write + core.tests.datasets.test_ase_lmdb.test_aselmdb_count + core.tests.datasets.test_ase_lmdb.test_aselmdb_delete + core.tests.datasets.test_ase_lmdb.test_aselmdb_randomreads + core.tests.datasets.test_ase_lmdb.test_aselmdb_constraintread + core.tests.datasets.test_ase_lmdb.test_update_keyvalue_pair + core.tests.datasets.test_ase_lmdb.test_update_atoms + core.tests.datasets.test_ase_lmdb.test_metadata + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + core.tests.datasets.test_ase_lmdb.N_WRITES + core.tests.datasets.test_ase_lmdb.N_READS + core.tests.datasets.test_ase_lmdb.test_structures + + +.. py:data:: N_WRITES + :value: 100 + + + +.. py:data:: N_READS + :value: 200 + + + +.. py:data:: test_structures + + + +.. py:function:: generate_random_structure() + + +.. py:function:: ase_lmbd_path(tmp_path_factory) + + +.. py:function:: test_aselmdb_write(ase_lmbd_path) -> None + + +.. py:function:: test_aselmdb_count(ase_lmbd_path) -> None + + +.. py:function:: test_aselmdb_delete(ase_lmbd_path) -> None + + +.. py:function:: test_aselmdb_randomreads(ase_lmbd_path) -> None + + +.. py:function:: test_aselmdb_constraintread(ase_lmbd_path) -> None + + +.. py:function:: test_update_keyvalue_pair(ase_lmbd_path) -> None + + +.. py:function:: test_update_atoms(ase_lmbd_path) -> None + + +.. py:function:: test_metadata(ase_lmbd_path) -> None + + diff --git a/_sources/autoapi/core/tests/datasets/test_utils/index.rst b/_sources/autoapi/core/tests/datasets/test_utils/index.rst new file mode 100644 index 000000000..ac66a2c6c --- /dev/null +++ b/_sources/autoapi/core/tests/datasets/test_utils/index.rst @@ -0,0 +1,26 @@ +:py:mod:`core.tests.datasets.test_utils` +======================================== + +.. py:module:: core.tests.datasets.test_utils + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.datasets.test_utils.pyg_data + core.tests.datasets.test_utils.test_rename_data_object_keys + + + +.. py:function:: pyg_data() + + +.. py:function:: test_rename_data_object_keys(pyg_data) + + diff --git a/_sources/autoapi/core/tests/evaluator/test_evaluator/index.rst b/_sources/autoapi/core/tests/evaluator/test_evaluator/index.rst new file mode 100644 index 000000000..1e1754489 --- /dev/null +++ b/_sources/autoapi/core/tests/evaluator/test_evaluator/index.rst @@ -0,0 +1,80 @@ +:py:mod:`core.tests.evaluator.test_evaluator` +============================================= + +.. py:module:: core.tests.evaluator.test_evaluator + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.evaluator.test_evaluator.TestMetrics + core.tests.evaluator.test_evaluator.TestS2EFEval + core.tests.evaluator.test_evaluator.TestIS2RSEval + core.tests.evaluator.test_evaluator.TestIS2REEval + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.evaluator.test_evaluator.load_evaluator_s2ef + core.tests.evaluator.test_evaluator.load_evaluator_is2rs + core.tests.evaluator.test_evaluator.load_evaluator_is2re + + + +.. py:function:: load_evaluator_s2ef(request) -> None + + +.. py:function:: load_evaluator_is2rs(request) -> None + + +.. py:function:: load_evaluator_is2re(request) -> None + + +.. py:class:: TestMetrics + + + .. py:method:: test_cosine_similarity() -> None + + + .. py:method:: test_magnitude_error() -> None + + + +.. py:class:: TestS2EFEval + + + .. py:method:: test_metrics_exist() -> None + + + +.. py:class:: TestIS2RSEval + + + .. py:method:: test_metrics_exist() -> None + + + +.. py:class:: TestIS2REEval + + + .. py:method:: test_metrics_exist() -> None + + + diff --git a/_sources/autoapi/core/tests/index.rst b/_sources/autoapi/core/tests/index.rst new file mode 100644 index 000000000..516683842 --- /dev/null +++ b/_sources/autoapi/core/tests/index.rst @@ -0,0 +1,32 @@ +:py:mod:`core.tests` +==================== + +.. py:module:: core.tests + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + preprocessing/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + conftest/index.rst + + diff --git a/_sources/autoapi/core/tests/models/test_dimenetpp/index.rst b/_sources/autoapi/core/tests/models/test_dimenetpp/index.rst new file mode 100644 index 000000000..073988ce7 --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_dimenetpp/index.rst @@ -0,0 +1,52 @@ +:py:mod:`core.tests.models.test_dimenetpp` +========================================== + +.. py:module:: core.tests.models.test_dimenetpp + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_dimenetpp.TestDimeNet + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_dimenetpp.load_data + core.tests.models.test_dimenetpp.load_model + + + +.. py:function:: load_data(request) -> None + + +.. py:function:: load_model(request) -> None + + +.. py:class:: TestDimeNet + + + .. py:method:: test_rotation_invariance() -> None + + + .. py:method:: test_energy_force_shape(snapshot) -> None + + + diff --git a/_sources/autoapi/core/tests/models/test_equiformer_v2/index.rst b/_sources/autoapi/core/tests/models/test_equiformer_v2/index.rst new file mode 100644 index 000000000..fa8a65e4c --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_equiformer_v2/index.rst @@ -0,0 +1,57 @@ +:py:mod:`core.tests.models.test_equiformer_v2` +============================================== + +.. py:module:: core.tests.models.test_equiformer_v2 + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_equiformer_v2.TestEquiformerV2 + core.tests.models.test_equiformer_v2.TestMPrimaryLPrimary + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_equiformer_v2.load_data + core.tests.models.test_equiformer_v2.load_model + + + +.. py:function:: load_data(request) + + +.. py:function:: load_model(request) + + +.. py:class:: TestEquiformerV2 + + + .. py:method:: test_energy_force_shape(snapshot) + + + +.. py:class:: TestMPrimaryLPrimary + + + .. py:method:: test_mprimary_lprimary_mappings() + + + diff --git a/_sources/autoapi/core/tests/models/test_escn/index.rst b/_sources/autoapi/core/tests/models/test_escn/index.rst new file mode 100644 index 000000000..0fcd327a5 --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_escn/index.rst @@ -0,0 +1,26 @@ +:py:mod:`core.tests.models.test_escn` +===================================== + +.. py:module:: core.tests.models.test_escn + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_escn.TestMPrimaryLPrimary + + + + +.. py:class:: TestMPrimaryLPrimary + + + .. py:method:: test_mprimary_lprimary_mappings() + + + diff --git a/_sources/autoapi/core/tests/models/test_gemnet/index.rst b/_sources/autoapi/core/tests/models/test_gemnet/index.rst new file mode 100644 index 000000000..f962a9d23 --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_gemnet/index.rst @@ -0,0 +1,52 @@ +:py:mod:`core.tests.models.test_gemnet` +======================================= + +.. py:module:: core.tests.models.test_gemnet + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_gemnet.TestGemNetT + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_gemnet.load_data + core.tests.models.test_gemnet.load_model + + + +.. py:function:: load_data(request) -> None + + +.. py:function:: load_model(request) -> None + + +.. py:class:: TestGemNetT + + + .. py:method:: test_rotation_invariance() -> None + + + .. py:method:: test_energy_force_shape(snapshot) -> None + + + diff --git a/_sources/autoapi/core/tests/models/test_gemnet_oc/index.rst b/_sources/autoapi/core/tests/models/test_gemnet_oc/index.rst new file mode 100644 index 000000000..c589bcd51 --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_gemnet_oc/index.rst @@ -0,0 +1,52 @@ +:py:mod:`core.tests.models.test_gemnet_oc` +========================================== + +.. py:module:: core.tests.models.test_gemnet_oc + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_gemnet_oc.TestGemNetOC + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_gemnet_oc.load_data + core.tests.models.test_gemnet_oc.load_model + + + +.. py:function:: load_data(request) -> None + + +.. py:function:: load_model(request) -> None + + +.. py:class:: TestGemNetOC + + + .. py:method:: test_rotation_invariance() -> None + + + .. py:method:: test_energy_force_shape(snapshot) -> None + + + diff --git a/_sources/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst b/_sources/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst new file mode 100644 index 000000000..c28935b5c --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst @@ -0,0 +1,43 @@ +:py:mod:`core.tests.models.test_gemnet_oc_scaling_mismatch` +=========================================================== + +.. py:module:: core.tests.models.test_gemnet_oc_scaling_mismatch + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_gemnet_oc_scaling_mismatch.TestGemNetOC + + + + +.. py:class:: TestGemNetOC + + + .. py:method:: test_no_scaling_mismatch() -> None + + + .. py:method:: test_scaling_mismatch() -> None + + + .. py:method:: test_no_file_exists() -> None + + + .. py:method:: test_not_fitted() -> None + + + diff --git a/_sources/autoapi/core/tests/models/test_schnet/index.rst b/_sources/autoapi/core/tests/models/test_schnet/index.rst new file mode 100644 index 000000000..013f6b081 --- /dev/null +++ b/_sources/autoapi/core/tests/models/test_schnet/index.rst @@ -0,0 +1,52 @@ +:py:mod:`core.tests.models.test_schnet` +======================================= + +.. py:module:: core.tests.models.test_schnet + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_schnet.TestSchNet + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.models.test_schnet.load_data + core.tests.models.test_schnet.load_model + + + +.. py:function:: load_data(request) -> None + + +.. py:function:: load_model(request) -> None + + +.. py:class:: TestSchNet + + + .. py:method:: test_rotation_invariance() -> None + + + .. py:method:: test_energy_force_shape(snapshot) -> None + + + diff --git a/_sources/autoapi/core/tests/preprocessing/index.rst b/_sources/autoapi/core/tests/preprocessing/index.rst new file mode 100644 index 000000000..5dbe1bb9a --- /dev/null +++ b/_sources/autoapi/core/tests/preprocessing/index.rst @@ -0,0 +1,25 @@ +:py:mod:`core.tests.preprocessing` +================================== + +.. py:module:: core.tests.preprocessing + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_atoms_to_graphs/index.rst + test_pbc/index.rst + test_radius_graph_pbc/index.rst + + diff --git a/_sources/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.rst b/_sources/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.rst new file mode 100644 index 000000000..46546d4ee --- /dev/null +++ b/_sources/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.rst @@ -0,0 +1,51 @@ +:py:mod:`core.tests.preprocessing.test_atoms_to_graphs` +======================================================= + +.. py:module:: core.tests.preprocessing.test_atoms_to_graphs + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_atoms_to_graphs.TestAtomsToGraphs + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_atoms_to_graphs.atoms_to_graphs_internals + + + +.. py:function:: atoms_to_graphs_internals(request) -> None + + +.. py:class:: TestAtomsToGraphs + + + .. py:method:: test_gen_neighbors_pymatgen() -> None + + + .. py:method:: test_convert() -> None + + + .. py:method:: test_convert_all() -> None + + + diff --git a/_sources/autoapi/core/tests/preprocessing/test_pbc/index.rst b/_sources/autoapi/core/tests/preprocessing/test_pbc/index.rst new file mode 100644 index 000000000..3e8ffe2fb --- /dev/null +++ b/_sources/autoapi/core/tests/preprocessing/test_pbc/index.rst @@ -0,0 +1,45 @@ +:py:mod:`core.tests.preprocessing.test_pbc` +=========================================== + +.. py:module:: core.tests.preprocessing.test_pbc + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_pbc.TestPBC + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_pbc.load_data + + + +.. py:function:: load_data(request) -> None + + +.. py:class:: TestPBC + + + .. py:method:: test_pbc_distances() -> None + + + diff --git a/_sources/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.rst b/_sources/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.rst new file mode 100644 index 000000000..a754b5af8 --- /dev/null +++ b/_sources/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.rst @@ -0,0 +1,55 @@ +:py:mod:`core.tests.preprocessing.test_radius_graph_pbc` +======================================================== + +.. py:module:: core.tests.preprocessing.test_radius_graph_pbc + +.. autoapi-nested-parse:: + + Copyright (c) Facebook, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_radius_graph_pbc.TestRadiusGraphPBC + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + core.tests.preprocessing.test_radius_graph_pbc.load_data + core.tests.preprocessing.test_radius_graph_pbc.check_features_match + + + +.. py:function:: load_data(request) -> None + + +.. py:function:: check_features_match(edge_index_1, cell_offsets_1, edge_index_2, cell_offsets_2) -> bool + + +.. py:class:: TestRadiusGraphPBC + + + .. py:method:: test_radius_graph_pbc() -> None + + + .. py:method:: test_bulk() -> None + + + .. py:method:: test_molecule() -> None + + + diff --git a/_sources/autoapi/core/trainers/base_trainer/index.rst b/_sources/autoapi/core/trainers/base_trainer/index.rst new file mode 100644 index 000000000..6e8efb4ed --- /dev/null +++ b/_sources/autoapi/core/trainers/base_trainer/index.rst @@ -0,0 +1,103 @@ +:py:mod:`core.trainers.base_trainer` +==================================== + +.. py:module:: core.trainers.base_trainer + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.trainers.base_trainer.BaseTrainer + + + + +.. py:class:: BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False) + + + Bases: :py:obj:`abc.ABC` + + Helper class that provides a standard way to create an ABC using + inheritance. + + .. py:property:: _unwrapped_model + + + .. py:method:: train(disable_eval_tqdm: bool = False) -> None + :abstractmethod: + + Run model training iterations. + + + .. py:method:: _get_timestamp(device: torch.device, suffix: str | None) -> str + :staticmethod: + + + .. py:method:: load() -> None + + + .. py:method:: set_seed(seed) -> None + + + .. py:method:: load_seed_from_config() -> None + + + .. py:method:: load_logger() -> None + + + .. py:method:: get_sampler(dataset, batch_size: int, shuffle: bool) -> fairchem.core.common.data_parallel.BalancedBatchSampler + + + .. py:method:: get_dataloader(dataset, sampler) -> torch.utils.data.DataLoader + + + .. py:method:: load_datasets() -> None + + + .. py:method:: load_task() + + + .. py:method:: load_model() -> None + + + .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None + + + .. py:method:: load_loss() -> None + + + .. py:method:: load_optimizer() -> None + + + .. py:method:: load_extras() -> None + + + .. py:method:: save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) -> str | None + + + .. py:method:: update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) -> None + + + .. py:method:: validate(split: str = 'val', disable_tqdm: bool = False) + + + .. py:method:: _backward(loss) -> None + + + .. py:method:: save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) -> None + + + diff --git a/_sources/autoapi/core/trainers/index.rst b/_sources/autoapi/core/trainers/index.rst new file mode 100644 index 000000000..8e9c6fb6d --- /dev/null +++ b/_sources/autoapi/core/trainers/index.rst @@ -0,0 +1,184 @@ +:py:mod:`core.trainers` +======================= + +.. py:module:: core.trainers + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + base_trainer/index.rst + ocp_trainer/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.trainers.BaseTrainer + core.trainers.OCPTrainer + + + + +.. py:class:: BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False) + + + Bases: :py:obj:`abc.ABC` + + Helper class that provides a standard way to create an ABC using + inheritance. + + .. py:property:: _unwrapped_model + + + .. py:method:: train(disable_eval_tqdm: bool = False) -> None + :abstractmethod: + + Run model training iterations. + + + .. py:method:: _get_timestamp(device: torch.device, suffix: str | None) -> str + :staticmethod: + + + .. py:method:: load() -> None + + + .. py:method:: set_seed(seed) -> None + + + .. py:method:: load_seed_from_config() -> None + + + .. py:method:: load_logger() -> None + + + .. py:method:: get_sampler(dataset, batch_size: int, shuffle: bool) -> fairchem.core.common.data_parallel.BalancedBatchSampler + + + .. py:method:: get_dataloader(dataset, sampler) -> torch.utils.data.DataLoader + + + .. py:method:: load_datasets() -> None + + + .. py:method:: load_task() + + + .. py:method:: load_model() -> None + + + .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None + + + .. py:method:: load_loss() -> None + + + .. py:method:: load_optimizer() -> None + + + .. py:method:: load_extras() -> None + + + .. py:method:: save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) -> str | None + + + .. py:method:: update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) -> None + + + .. py:method:: validate(split: str = 'val', disable_tqdm: bool = False) + + + .. py:method:: _backward(loss) -> None + + + .. py:method:: save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) -> None + + + +.. py:class:: OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') + + + Bases: :py:obj:`fairchem.core.trainers.base_trainer.BaseTrainer` + + Trainer class for the Structure to Energy & Force (S2EF) and Initial State to + Relaxed State (IS2RS) tasks. + + .. note:: + + Examples of configurations for task, model, dataset and optimizer + can be found in `configs/ocp_s2ef `_ + and `configs/ocp_is2rs `_. + + :param task: Task configuration. + :type task: dict + :param model: Model configuration. + :type model: dict + :param outputs: Output property configuration. + :type outputs: dict + :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. + :type dataset: dict + :param optimizer: Optimizer configuration. + :type optimizer: dict + :param loss_fns: Loss function configuration. + :type loss_fns: dict + :param eval_metrics: Evaluation metrics configuration. + :type eval_metrics: dict + :param identifier: Experiment identifier that is appended to log directory. + :type identifier: str + :param run_dir: Path to the run directory where logs are to be saved. + (default: :obj:`None`) + :type run_dir: str, optional + :param is_debug: Run in debug mode. + (default: :obj:`False`) + :type is_debug: bool, optional + :param print_every: Frequency of printing logs. + (default: :obj:`100`) + :type print_every: int, optional + :param seed: Random number seed. + (default: :obj:`None`) + :type seed: int, optional + :param logger: Type of logger to be used. + (default: :obj:`wandb`) + :type logger: str, optional + :param local_rank: Local rank of the process, only applicable for distributed training. + (default: :obj:`0`) + :type local_rank: int, optional + :param amp: Run using automatic mixed precision. + (default: :obj:`False`) + :type amp: bool, optional + :param slurm: Slurm configuration. Currently just for keeping track. + (default: :obj:`{}`) + :type slurm: dict + :param noddp: Run model without DDP. + :type noddp: bool, optional + + .. py:method:: train(disable_eval_tqdm: bool = False) -> None + + Run model training iterations. + + + .. py:method:: _forward(batch) + + + .. py:method:: _compute_loss(out, batch) + + + .. py:method:: _compute_metrics(out, batch, evaluator, metrics=None) + + + .. py:method:: predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False) + + + .. py:method:: run_relaxations(split='val') + + + diff --git a/_sources/autoapi/core/trainers/ocp_trainer/index.rst b/_sources/autoapi/core/trainers/ocp_trainer/index.rst new file mode 100644 index 000000000..40d11d33a --- /dev/null +++ b/_sources/autoapi/core/trainers/ocp_trainer/index.rst @@ -0,0 +1,105 @@ +:py:mod:`core.trainers.ocp_trainer` +=================================== + +.. py:module:: core.trainers.ocp_trainer + +.. autoapi-nested-parse:: + + Copyright (c) Meta, Inc. and its affiliates. + + This source code is licensed under the MIT license found in the + LICENSE file in the root directory of this source tree. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + core.trainers.ocp_trainer.OCPTrainer + + + + +.. py:class:: OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') + + + Bases: :py:obj:`fairchem.core.trainers.base_trainer.BaseTrainer` + + Trainer class for the Structure to Energy & Force (S2EF) and Initial State to + Relaxed State (IS2RS) tasks. + + .. note:: + + Examples of configurations for task, model, dataset and optimizer + can be found in `configs/ocp_s2ef `_ + and `configs/ocp_is2rs `_. + + :param task: Task configuration. + :type task: dict + :param model: Model configuration. + :type model: dict + :param outputs: Output property configuration. + :type outputs: dict + :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. + :type dataset: dict + :param optimizer: Optimizer configuration. + :type optimizer: dict + :param loss_fns: Loss function configuration. + :type loss_fns: dict + :param eval_metrics: Evaluation metrics configuration. + :type eval_metrics: dict + :param identifier: Experiment identifier that is appended to log directory. + :type identifier: str + :param run_dir: Path to the run directory where logs are to be saved. + (default: :obj:`None`) + :type run_dir: str, optional + :param is_debug: Run in debug mode. + (default: :obj:`False`) + :type is_debug: bool, optional + :param print_every: Frequency of printing logs. + (default: :obj:`100`) + :type print_every: int, optional + :param seed: Random number seed. + (default: :obj:`None`) + :type seed: int, optional + :param logger: Type of logger to be used. + (default: :obj:`wandb`) + :type logger: str, optional + :param local_rank: Local rank of the process, only applicable for distributed training. + (default: :obj:`0`) + :type local_rank: int, optional + :param amp: Run using automatic mixed precision. + (default: :obj:`False`) + :type amp: bool, optional + :param slurm: Slurm configuration. Currently just for keeping track. + (default: :obj:`{}`) + :type slurm: dict + :param noddp: Run model without DDP. + :type noddp: bool, optional + + .. py:method:: train(disable_eval_tqdm: bool = False) -> None + + Run model training iterations. + + + .. py:method:: _forward(batch) + + + .. py:method:: _compute_loss(out, batch) + + + .. py:method:: _compute_metrics(out, batch, evaluator, metrics=None) + + + .. py:method:: predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False) + + + .. py:method:: run_relaxations(split='val') + + + diff --git a/_sources/autoapi/data/index.rst b/_sources/autoapi/data/index.rst new file mode 100644 index 000000000..d619c9314 --- /dev/null +++ b/_sources/autoapi/data/index.rst @@ -0,0 +1,17 @@ +:py:mod:`data` +============== + +.. py:module:: data + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + oc/index.rst + odac/index.rst + om/index.rst + + diff --git a/_sources/autoapi/data/oc/core/adsorbate/index.rst b/_sources/autoapi/data/oc/core/adsorbate/index.rst new file mode 100644 index 000000000..b0d2cd701 --- /dev/null +++ b/_sources/autoapi/data/oc/core/adsorbate/index.rst @@ -0,0 +1,75 @@ +:py:mod:`data.oc.core.adsorbate` +================================ + +.. py:module:: data.oc.core.adsorbate + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.adsorbate.Adsorbate + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.core.adsorbate.randomly_rotate_adsorbate + + + +.. py:class:: Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None) + + + Initializes an adsorbate object in one of 4 ways: + - Directly pass in an ase.Atoms object. + For this, you should also provide the index of the binding atom. + - Pass in index of adsorbate to select from adsorbate database. + - Pass in the SMILES string of the adsorbate to select from the database. + - Randomly sample an adsorbate from the adsorbate database. + + :param adsorbate_atoms: Adsorbate structure. + :type adsorbate_atoms: ase.Atoms + :param adsorbate_id_from_db: Index of adsorbate to select. + :type adsorbate_id_from_db: int + :param adsorbate_smiles_from_db: A SMILES string of the desired adsorbate. + :type adsorbate_smiles_from_db: str + :param adsorbate_db_path: Path to adsorbate database. + :type adsorbate_db_path: str + :param adsorbate_binding_indices: The index/indices of the adsorbate atoms which are expected to bind. + :type adsorbate_binding_indices: list + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: _get_adsorbate_from_random(adsorbate_db) + + + .. py:method:: _load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) -> None + + Saves the fields from an adsorbate stored in a database. Fields added + after the first revision are conditionally added for backwards + compatibility with older database files. + + + +.. py:function:: randomly_rotate_adsorbate(adsorbate_atoms: ase.Atoms, mode: str = 'random', binding_idx: int = None) + + diff --git a/_sources/autoapi/data/oc/core/adsorbate_slab_config/index.rst b/_sources/autoapi/data/oc/core/adsorbate_slab_config/index.rst new file mode 100644 index 000000000..76226d3c9 --- /dev/null +++ b/_sources/autoapi/data/oc/core/adsorbate_slab_config/index.rst @@ -0,0 +1,214 @@ +:py:mod:`data.oc.core.adsorbate_slab_config` +============================================ + +.. py:module:: data.oc.core.adsorbate_slab_config + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.adsorbate_slab_config.AdsorbateSlabConfig + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.core.adsorbate_slab_config.get_random_sites_on_triangle + data.oc.core.adsorbate_slab_config.custom_tile_atoms + data.oc.core.adsorbate_slab_config.get_interstitial_distances + data.oc.core.adsorbate_slab_config.there_is_overlap + + + +.. py:class:: AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random') + + + Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab. + + :param slab: Slab object. + :type slab: Slab + :param adsorbate: Adsorbate object. + :type adsorbate: Adsorbate + :param num_sites: Number of sites to sample. + :type num_sites: int + :param num_augmentations_per_site: Number of augmentations of the adsorbate per site. Total number of + generated structures will be `num_sites` * `num_augmentations_per_site`. + :type num_augmentations_per_site: int + :param interstitial_gap: Minimum distance in Angstroms between adsorbate and slab atoms. + :type interstitial_gap: float + :param mode: "random", "heuristic", or "random_site_heuristic_placement". + This affects surface site sampling and adsorbate placement on each site. + + In "random", we do a Delaunay triangulation of the surface atoms, then + sample sites uniformly at random within each triangle. When placing the + adsorbate, we randomly rotate it along xyz, and place it such that the + center of mass is at the site. + + In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most + energetically favorable sites, i.e., ontop, bridge, or hollow sites. + When placing the adsorbate, we randomly rotate it along z with only + slight rotation along x and y, and place it such that the binding atom + is at the site. + + In "random_site_heuristic_placement", we do a Delaunay triangulation of + the surface atoms, then sample sites uniformly at random within each + triangle. When placing the adsorbate, we randomly rotate it along z with + only slight rotation along x and y, and place it such that the binding + atom is at the site. + + In all cases, the adsorbate is placed at the closest position of no + overlap with the slab plus `interstitial_gap` along the surface normal. + :type mode: str + + .. py:method:: get_binding_sites(num_sites: int) + + Returns up to `num_sites` sites given the surface atoms' positions. + + + .. py:method:: place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding site. + + + .. py:method:: place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding sites. + + + .. py:method:: _get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1) + + Get the scaled normal that gives a proximate configuration without atomic + overlap by: + 1. Projecting the adsorbate and surface atoms onto the surface plane. + 2. Identify all adsorbate atom - surface atom combinations for which + an itersection when translating along the normal would occur. + This is where the distance between the projected points is less than + r_surface_atom + r_adsorbate_atom + 3. Explicitly solve for the scaled normal at which the distance between + surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + + interstitial_gap. This exploits the superposition of vectors and the + distance formula, so it requires root finding. + + Assumes that the adsorbate's binding atom or center-of-mass (depending + on mode) is already placed at the site. + + :param adsorbate_c: A copy of the adsorbate with coordinates at the site + :type adsorbate_c: ase.Atoms + :param slab_c: A copy of the slab + :type slab_c: ase.Atoms + :param site: the coordinate of the site + :type site: np.ndarray + :param adsorbate_atoms: the translated adsorbate + :type adsorbate_atoms: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + :param interstitial_gap: the desired distance between the covalent radii of the + closest surface and adsorbate atom + :type interstitial_gap: float + + :returns: the magnitude of the normal vector for placement + :rtype: (float) + + + .. py:method:: _find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float) + + Find the pairs of surface and adsorbate atoms that would have an intersection event + while traversing the normal vector. For each pair, return pertanent information for + finding the point of intersection. + :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site + :type adsorbate_c2: ase.Atoms + :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered + about the site + :type slab_c2: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + :param interstitial_gap: the desired distance between the covalent radii of the + closest surface and adsorbate atom + :type interstitial_gap: float + + :returns: + + each entry in the list corresponds to one pair to check. With the + following information: + [(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position] + :rtype: (list[lists]) + + + .. py:method:: _get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray) + + Find the x and y coordinates of each atom projected onto the surface plane. + :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site + :type adsorbate_c2: ase.Atoms + :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered + about the site + :type slab_c2: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + + :returns: {"ads": [[x1, y1], [x2, y2], ...], "slab": [[x1, y1], [x2, y2], ...],} + :rtype: (dict) + + + .. py:method:: get_metadata_dict(ind) + + Returns a dict containing the atoms object and metadata for + one specified config, used for writing to files. + + + +.. py:function:: get_random_sites_on_triangle(vertices: numpy.ndarray, num_sites: int = 10) + + Sample `num_sites` random sites uniformly on a given 3D triangle. + Following Sec. 4.2 from https://www.cs.princeton.edu/~funk/tog02.pdf. + + +.. py:function:: custom_tile_atoms(atoms: ase.Atoms) + + Tile the atoms so that the center tile has the indices and positions of the + untiled structure. + + :param atoms: the atoms object to be tiled + :type atoms: ase.Atoms + + :returns: + + the tiled atoms which has been repeated 3 times in + the x and y directions but maintains the original indices on the central + unit cell. + :rtype: (ase.Atoms) + + +.. py:function:: get_interstitial_distances(adsorbate_slab_config: ase.Atoms) + + Check to see if there is any atomic overlap between surface atoms + and adsorbate atoms. + + :param adsorbate_slab_configuration: an slab atoms object with an + adsorbate placed + :type adsorbate_slab_configuration: ase.Atoms + + :returns: True if there is atomic overlap, otherwise False + :rtype: (bool) + + +.. py:function:: there_is_overlap(adsorbate_slab_config: ase.Atoms) + + Check to see if there is any atomic overlap between surface atoms + and adsorbate atoms. + + :param adsorbate_slab_configuration: an slab atoms object with an + adsorbate placed + :type adsorbate_slab_configuration: ase.Atoms + + :returns: True if there is atomic overlap, otherwise False + :rtype: (bool) + + diff --git a/_sources/autoapi/data/oc/core/bulk/index.rst b/_sources/autoapi/data/oc/core/bulk/index.rst new file mode 100644 index 000000000..5df8803ba --- /dev/null +++ b/_sources/autoapi/data/oc/core/bulk/index.rst @@ -0,0 +1,72 @@ +:py:mod:`data.oc.core.bulk` +=========================== + +.. py:module:: data.oc.core.bulk + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.bulk.Bulk + + + + +.. py:class:: Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None) + + + Initializes a bulk object in one of 4 ways: + - Directly pass in an ase.Atoms object. + - Pass in index of bulk to select from bulk database. + - Pass in the src_id of the bulk to select from the bulk database. + - Randomly sample a bulk from bulk database if no other option is passed. + + :param bulk_atoms: Bulk structure. + :type bulk_atoms: ase.Atoms + :param bulk_id_from_db: Index of bulk in database pkl to select. + :type bulk_id_from_db: int + :param bulk_src_id_from_db: Src id of bulk to select (e.g. "mp-30"). + :type bulk_src_id_from_db: int + :param bulk_db_path: Path to bulk database. + :type bulk_db_path: str + :param bulk_db: Already-loaded database. + :type bulk_db: List[Dict[str, Any]] + + .. py:method:: _get_bulk_from_random(bulk_db) + + + .. py:method:: set_source_dataset_id(src_id: str) + + + .. py:method:: set_bulk_id_from_db(bulk_id_from_db: int) + + + .. py:method:: get_slabs(max_miller=2, precomputed_slabs_dir=None) + + Returns a list of possible slabs for this bulk instance. + + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: __eq__(other) -> bool + + Return self==value. + + + diff --git a/_sources/autoapi/data/oc/core/index.rst b/_sources/autoapi/data/oc/core/index.rst new file mode 100644 index 000000000..6907da522 --- /dev/null +++ b/_sources/autoapi/data/oc/core/index.rst @@ -0,0 +1,403 @@ +:py:mod:`data.oc.core` +====================== + +.. py:module:: data.oc.core + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + adsorbate/index.rst + adsorbate_slab_config/index.rst + bulk/index.rst + multi_adsorbate_slab_config/index.rst + slab/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.Bulk + data.oc.core.Slab + data.oc.core.Adsorbate + data.oc.core.AdsorbateSlabConfig + data.oc.core.MultipleAdsorbateSlabConfig + + + + +.. py:class:: Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None) + + + Initializes a bulk object in one of 4 ways: + - Directly pass in an ase.Atoms object. + - Pass in index of bulk to select from bulk database. + - Pass in the src_id of the bulk to select from the bulk database. + - Randomly sample a bulk from bulk database if no other option is passed. + + :param bulk_atoms: Bulk structure. + :type bulk_atoms: ase.Atoms + :param bulk_id_from_db: Index of bulk in database pkl to select. + :type bulk_id_from_db: int + :param bulk_src_id_from_db: Src id of bulk to select (e.g. "mp-30"). + :type bulk_src_id_from_db: int + :param bulk_db_path: Path to bulk database. + :type bulk_db_path: str + :param bulk_db: Already-loaded database. + :type bulk_db: List[Dict[str, Any]] + + .. py:method:: _get_bulk_from_random(bulk_db) + + + .. py:method:: set_source_dataset_id(src_id: str) + + + .. py:method:: set_bulk_id_from_db(bulk_id_from_db: int) + + + .. py:method:: get_slabs(max_miller=2, precomputed_slabs_dir=None) + + Returns a list of possible slabs for this bulk instance. + + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: __eq__(other) -> bool + + Return self==value. + + + +.. py:class:: Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8) + + + Initializes a slab object, i.e. a particular slab tiled along xyz, in + one of 2 ways: + - Pass in a Bulk object and a slab 5-tuple containing + (atoms, miller, shift, top, oriented bulk). + - Pass in a Bulk object and randomly sample a slab. + + :param bulk: Corresponding Bulk object. + :type bulk: Bulk + :param slab_atoms: Slab atoms, tiled and tagged + :type slab_atoms: ase.Atoms + :param millers: Miller indices of slab. + :type millers: tuple + :param shift: Shift of slab. + :type shift: float + :param top: Whether slab is top or bottom. + :type top: bool + :param min_ab: To confirm that the tiled structure spans this distance + :type min_ab: float + + .. py:method:: from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0) + :classmethod: + + + .. py:method:: from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs) + :classmethod: + + + .. py:method:: has_surface_tagged() + + + .. py:method:: get_metadata_dict() + + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: __eq__(other) + + Return self==value. + + + +.. py:class:: Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None) + + + Initializes an adsorbate object in one of 4 ways: + - Directly pass in an ase.Atoms object. + For this, you should also provide the index of the binding atom. + - Pass in index of adsorbate to select from adsorbate database. + - Pass in the SMILES string of the adsorbate to select from the database. + - Randomly sample an adsorbate from the adsorbate database. + + :param adsorbate_atoms: Adsorbate structure. + :type adsorbate_atoms: ase.Atoms + :param adsorbate_id_from_db: Index of adsorbate to select. + :type adsorbate_id_from_db: int + :param adsorbate_smiles_from_db: A SMILES string of the desired adsorbate. + :type adsorbate_smiles_from_db: str + :param adsorbate_db_path: Path to adsorbate database. + :type adsorbate_db_path: str + :param adsorbate_binding_indices: The index/indices of the adsorbate atoms which are expected to bind. + :type adsorbate_binding_indices: list + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: _get_adsorbate_from_random(adsorbate_db) + + + .. py:method:: _load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) -> None + + Saves the fields from an adsorbate stored in a database. Fields added + after the first revision are conditionally added for backwards + compatibility with older database files. + + + +.. py:class:: AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random') + + + Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab. + + :param slab: Slab object. + :type slab: Slab + :param adsorbate: Adsorbate object. + :type adsorbate: Adsorbate + :param num_sites: Number of sites to sample. + :type num_sites: int + :param num_augmentations_per_site: Number of augmentations of the adsorbate per site. Total number of + generated structures will be `num_sites` * `num_augmentations_per_site`. + :type num_augmentations_per_site: int + :param interstitial_gap: Minimum distance in Angstroms between adsorbate and slab atoms. + :type interstitial_gap: float + :param mode: "random", "heuristic", or "random_site_heuristic_placement". + This affects surface site sampling and adsorbate placement on each site. + + In "random", we do a Delaunay triangulation of the surface atoms, then + sample sites uniformly at random within each triangle. When placing the + adsorbate, we randomly rotate it along xyz, and place it such that the + center of mass is at the site. + + In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most + energetically favorable sites, i.e., ontop, bridge, or hollow sites. + When placing the adsorbate, we randomly rotate it along z with only + slight rotation along x and y, and place it such that the binding atom + is at the site. + + In "random_site_heuristic_placement", we do a Delaunay triangulation of + the surface atoms, then sample sites uniformly at random within each + triangle. When placing the adsorbate, we randomly rotate it along z with + only slight rotation along x and y, and place it such that the binding + atom is at the site. + + In all cases, the adsorbate is placed at the closest position of no + overlap with the slab plus `interstitial_gap` along the surface normal. + :type mode: str + + .. py:method:: get_binding_sites(num_sites: int) + + Returns up to `num_sites` sites given the surface atoms' positions. + + + .. py:method:: place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding site. + + + .. py:method:: place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding sites. + + + .. py:method:: _get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1) + + Get the scaled normal that gives a proximate configuration without atomic + overlap by: + 1. Projecting the adsorbate and surface atoms onto the surface plane. + 2. Identify all adsorbate atom - surface atom combinations for which + an itersection when translating along the normal would occur. + This is where the distance between the projected points is less than + r_surface_atom + r_adsorbate_atom + 3. Explicitly solve for the scaled normal at which the distance between + surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + + interstitial_gap. This exploits the superposition of vectors and the + distance formula, so it requires root finding. + + Assumes that the adsorbate's binding atom or center-of-mass (depending + on mode) is already placed at the site. + + :param adsorbate_c: A copy of the adsorbate with coordinates at the site + :type adsorbate_c: ase.Atoms + :param slab_c: A copy of the slab + :type slab_c: ase.Atoms + :param site: the coordinate of the site + :type site: np.ndarray + :param adsorbate_atoms: the translated adsorbate + :type adsorbate_atoms: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + :param interstitial_gap: the desired distance between the covalent radii of the + closest surface and adsorbate atom + :type interstitial_gap: float + + :returns: the magnitude of the normal vector for placement + :rtype: (float) + + + .. py:method:: _find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float) + + Find the pairs of surface and adsorbate atoms that would have an intersection event + while traversing the normal vector. For each pair, return pertanent information for + finding the point of intersection. + :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site + :type adsorbate_c2: ase.Atoms + :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered + about the site + :type slab_c2: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + :param interstitial_gap: the desired distance between the covalent radii of the + closest surface and adsorbate atom + :type interstitial_gap: float + + :returns: + + each entry in the list corresponds to one pair to check. With the + following information: + [(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position] + :rtype: (list[lists]) + + + .. py:method:: _get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray) + + Find the x and y coordinates of each atom projected onto the surface plane. + :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site + :type adsorbate_c2: ase.Atoms + :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered + about the site + :type slab_c2: ase.Atoms + :param unit_normal: the unit vector normal to the surface + :type unit_normal: np.ndarray + + :returns: {"ads": [[x1, y1], [x2, y2], ...], "slab": [[x1, y1], [x2, y2], ...],} + :rtype: (dict) + + + .. py:method:: get_metadata_dict(ind) + + Returns a dict containing the atoms object and metadata for + one specified config, used for writing to files. + + + +.. py:class:: MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement') + + + Bases: :py:obj:`fairchem.data.oc.core.AdsorbateSlabConfig` + + Class to represent a slab with multiple adsorbates on it. This class only + returns a fixed combination of adsorbates placed on the surface. Unlike + AdsorbateSlabConfig which enumerates all possible adsorbate placements, this + problem gets combinatorially large. + + :param slab: Slab object. + :type slab: Slab + :param adsorbates: List of adsorbate objects to place on the slab. + :type adsorbates: List[Adsorbate] + :param num_sites: Number of sites to sample. + :type num_sites: int + :param num_configurations: Number of configurations to generate per slab+adsorbate(s) combination. + This corresponds to selecting different site combinations to place + the adsorbates on. + :type num_configurations: int + :param interstitial_gap: Minimum distance, in Angstroms, between adsorbate and slab atoms as + well as the inter-adsorbate distance. + :type interstitial_gap: float + :param mode: "random", "heuristic", or "random_site_heuristic_placement". + This affects surface site sampling and adsorbate placement on each site. + + In "random", we do a Delaunay triangulation of the surface atoms, then + sample sites uniformly at random within each triangle. When placing the + adsorbate, we randomly rotate it along xyz, and place it such that the + center of mass is at the site. + + In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most + energetically favorable sites, i.e., ontop, bridge, or hollow sites. + When placing the adsorbate, we randomly rotate it along z with only + slight rotation along x and y, and place it such that the binding atom + is at the site. + + In "random_site_heuristic_placement", we do a Delaunay triangulation of + the surface atoms, then sample sites uniformly at random within each + triangle. When placing the adsorbate, we randomly rotate it along z with + only slight rotation along x and y, and place it such that the binding + atom is at the site. + + In all cases, the adsorbate is placed at the closest position of no + overlap with the slab plus `interstitial_gap` along the surface normal. + :type mode: str + + .. py:method:: place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding sites. + + This method generates a fixed number of configurations where sites are + selected to ensure that adsorbate binding indices are at least a fair + distance away from each other (covalent radii + interstitial gap). + While this helps prevent adsorbate overlap it does not gaurantee it + since non-binding adsorbate atoms can overlap if the right combination + of angles is sampled. + + + .. py:method:: get_metadata_dict(ind) + + Returns a dict containing the atoms object and metadata for + one specified config, used for writing to files. + + + diff --git a/_sources/autoapi/data/oc/core/multi_adsorbate_slab_config/index.rst b/_sources/autoapi/data/oc/core/multi_adsorbate_slab_config/index.rst new file mode 100644 index 000000000..570a74988 --- /dev/null +++ b/_sources/autoapi/data/oc/core/multi_adsorbate_slab_config/index.rst @@ -0,0 +1,101 @@ +:py:mod:`data.oc.core.multi_adsorbate_slab_config` +================================================== + +.. py:module:: data.oc.core.multi_adsorbate_slab_config + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.multi_adsorbate_slab_config.MultipleAdsorbateSlabConfig + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.core.multi_adsorbate_slab_config.update_distance_map + + + +.. py:class:: MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement') + + + Bases: :py:obj:`fairchem.data.oc.core.AdsorbateSlabConfig` + + Class to represent a slab with multiple adsorbates on it. This class only + returns a fixed combination of adsorbates placed on the surface. Unlike + AdsorbateSlabConfig which enumerates all possible adsorbate placements, this + problem gets combinatorially large. + + :param slab: Slab object. + :type slab: Slab + :param adsorbates: List of adsorbate objects to place on the slab. + :type adsorbates: List[Adsorbate] + :param num_sites: Number of sites to sample. + :type num_sites: int + :param num_configurations: Number of configurations to generate per slab+adsorbate(s) combination. + This corresponds to selecting different site combinations to place + the adsorbates on. + :type num_configurations: int + :param interstitial_gap: Minimum distance, in Angstroms, between adsorbate and slab atoms as + well as the inter-adsorbate distance. + :type interstitial_gap: float + :param mode: "random", "heuristic", or "random_site_heuristic_placement". + This affects surface site sampling and adsorbate placement on each site. + + In "random", we do a Delaunay triangulation of the surface atoms, then + sample sites uniformly at random within each triangle. When placing the + adsorbate, we randomly rotate it along xyz, and place it such that the + center of mass is at the site. + + In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most + energetically favorable sites, i.e., ontop, bridge, or hollow sites. + When placing the adsorbate, we randomly rotate it along z with only + slight rotation along x and y, and place it such that the binding atom + is at the site. + + In "random_site_heuristic_placement", we do a Delaunay triangulation of + the surface atoms, then sample sites uniformly at random within each + triangle. When placing the adsorbate, we randomly rotate it along z with + only slight rotation along x and y, and place it such that the binding + atom is at the site. + + In all cases, the adsorbate is placed at the closest position of no + overlap with the slab plus `interstitial_gap` along the surface normal. + :type mode: str + + .. py:method:: place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1) + + Place the adsorbate at the given binding sites. + + This method generates a fixed number of configurations where sites are + selected to ensure that adsorbate binding indices are at least a fair + distance away from each other (covalent radii + interstitial gap). + While this helps prevent adsorbate overlap it does not gaurantee it + since non-binding adsorbate atoms can overlap if the right combination + of angles is sampled. + + + .. py:method:: get_metadata_dict(ind) + + Returns a dict containing the atoms object and metadata for + one specified config, used for writing to files. + + + +.. py:function:: update_distance_map(prev_distance_map, site_idx, adsorbate, pseudo_atoms) + + Given a new site and the adsorbate we plan on placing there, + update the distance mapping to reflect the new distances from sites to nearest adsorbates. + We incorporate the covalent radii of the placed adsorbate binding atom in our distance + calculation to prevent atom overlap. + + diff --git a/_sources/autoapi/data/oc/core/slab/index.rst b/_sources/autoapi/data/oc/core/slab/index.rst new file mode 100644 index 000000000..93f0032c4 --- /dev/null +++ b/_sources/autoapi/data/oc/core/slab/index.rst @@ -0,0 +1,309 @@ +:py:mod:`data.oc.core.slab` +=========================== + +.. py:module:: data.oc.core.slab + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.core.slab.Slab + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.core.slab.tile_and_tag_atoms + data.oc.core.slab.set_fixed_atom_constraints + data.oc.core.slab.tag_surface_atoms + data.oc.core.slab.tile_atoms + data.oc.core.slab.find_surface_atoms_by_height + data.oc.core.slab.find_surface_atoms_with_voronoi_given_height + data.oc.core.slab.calculate_center_of_mass + data.oc.core.slab.calculate_coordination_of_bulk_atoms + data.oc.core.slab.compute_slabs + data.oc.core.slab.flip_struct + data.oc.core.slab.is_structure_invertible + data.oc.core.slab.standardize_bulk + + + +.. py:class:: Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8) + + + Initializes a slab object, i.e. a particular slab tiled along xyz, in + one of 2 ways: + - Pass in a Bulk object and a slab 5-tuple containing + (atoms, miller, shift, top, oriented bulk). + - Pass in a Bulk object and randomly sample a slab. + + :param bulk: Corresponding Bulk object. + :type bulk: Bulk + :param slab_atoms: Slab atoms, tiled and tagged + :type slab_atoms: ase.Atoms + :param millers: Miller indices of slab. + :type millers: tuple + :param shift: Shift of slab. + :type shift: float + :param top: Whether slab is top or bottom. + :type top: bool + :param min_ab: To confirm that the tiled structure spans this distance + :type min_ab: float + + .. py:method:: from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None) + :classmethod: + + + .. py:method:: from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0) + :classmethod: + + + .. py:method:: from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs) + :classmethod: + + + .. py:method:: has_surface_tagged() + + + .. py:method:: get_metadata_dict() + + + .. py:method:: __len__() + + + .. py:method:: __str__() + + Return str(self). + + + .. py:method:: __repr__() + + Return repr(self). + + + .. py:method:: __eq__(other) + + Return self==value. + + + +.. py:function:: tile_and_tag_atoms(unit_slab_struct: pymatgen.core.structure.Structure, bulk_atoms: ase.Atoms, min_ab: float = 8) + + This function combines the next three functions that tile, tag, + and constrain the atoms. + + :param unit_slab_struct: The untiled slab structure + :type unit_slab_struct: Structure + :param bulk_atoms: Atoms of the corresponding bulk structure, used for tagging + :type bulk_atoms: ase.Atoms + :param min_ab: The minimum distance in x and y spanned by the tiled structure. + :type min_ab: float + + :returns: **atoms_tiled** -- A copy of the slab atoms that is tiled, tagged, and constrained + :rtype: ase.Atoms + + +.. py:function:: set_fixed_atom_constraints(atoms) + + This function fixes sub-surface atoms of a surface. Also works on systems + that have surface + adsorbate(s), as long as the bulk atoms are tagged with + `0`, surface atoms are tagged with `1`, and the adsorbate atoms are tagged + with `2` or above. + + This is used for both surface atoms and the combined surface+adsorbate. + + :param atoms: Atoms object of the slab or slab+adsorbate system, with bulk atoms + tagged as `0`, surface atoms tagged as `1`, and adsorbate atoms tagged + as `2` or above. + :type atoms: ase.Atoms + + :returns: **atoms** -- A deep copy of the `atoms` argument, but where the appropriate + atoms are constrained. + :rtype: ase.Atoms + + +.. py:function:: tag_surface_atoms(slab_atoms: ase.Atoms = None, bulk_atoms: ase.Atoms = None) + + Sets the tags of an `ase.Atoms` object. Any atom that we consider a "bulk" + atom will have a tag of 0, and any atom that we consider a "surface" atom + will have a tag of 1. We use a combination of Voronoi neighbor algorithms + (adapted from `pymatgen.core.surface.Slab.get_surface_sites`; see + https://pymatgen.org/pymatgen.core.surface.html) and a distance cutoff. + + :param slab_atoms: The slab where you are trying to find surface sites. + :type slab_atoms: ase.Atoms + :param bulk_atoms: The bulk structure that the surface was cut from. + :type bulk_atoms: ase.Atoms + + :returns: **slab_atoms** -- A copy of the slab atoms with the surface atoms tagged as 1. + :rtype: ase.Atoms + + +.. py:function:: tile_atoms(atoms: ase.Atoms, min_ab: float = 8) + + This function will repeat an atoms structure in the direction of the a and b + lattice vectors such that they are at least as wide as the min_ab constant. + + :param atoms: The structure to tile. + :type atoms: ase.Atoms + :param min_ab: The minimum distance in x and y spanned by the tiled structure. + :type min_ab: float + + :returns: **atoms_tiled** -- The tiled structure. + :rtype: ase.Atoms + + +.. py:function:: find_surface_atoms_by_height(surface_atoms) + + As discussed in the docstring for `find_surface_atoms_with_voronoi`, + sometimes we might accidentally tag a surface atom as a bulk atom if there + are multiple coordination environments for that atom type within the bulk. + One heuristic that we use to address this is to simply figure out if an + atom is close to the surface. This function will figure that out. + + Specifically: We consider an atom a surface atom if it is within 2 + Angstroms of the heighest atom in the z-direction (or more accurately, the + direction of the 3rd unit cell vector). + + :param surface_atoms: + :type surface_atoms: ase.Atoms + + :returns: **tags** -- A list that contains the indices of the surface atoms. + :rtype: list + + +.. py:function:: find_surface_atoms_with_voronoi_given_height(bulk_atoms, slab_atoms, height_tags) + + Labels atoms as surface or bulk atoms according to their coordination + relative to their bulk structure. If an atom's coordination is less than it + normally is in a bulk, then we consider it a surface atom. We calculate the + coordination using pymatgen's Voronoi algorithms. + + Note that if a single element has different sites within a bulk and these + sites have different coordinations, then we consider slab atoms + "under-coordinated" only if they are less coordinated than the most under + undercoordinated bulk atom. For example: Say we have a bulk with two Cu + sites. One site has a coordination of 12 and another a coordination of 9. + If a slab atom has a coordination of 10, we will consider it a bulk atom. + + :param bulk_atoms: The bulk structure that the surface was cut from. + :type bulk_atoms: ase.Atoms + :param slab_atoms: The slab structure. + :type slab_atoms: ase.Atoms + :param height_tags: The tags determined by the `find_surface_atoms_by_height` algo. + :type height_tags: list + + :returns: **tags** -- A list of 0s and 1s whose indices align with the atoms in + `slab_atoms`. 0s indicate a bulk atom and 1 indicates a surface atom. + :rtype: list + + +.. py:function:: calculate_center_of_mass(struct) + + Calculates the center of mass of the slab. + + +.. py:function:: calculate_coordination_of_bulk_atoms(bulk_atoms) + + Finds all unique atoms in a bulk structure and then determines their + coordination number. Then parses these coordination numbers into a + dictionary whose keys are the elements of the atoms and whose values are + their possible coordination numbers. + For example: `bulk_cns = {'Pt': {3., 12.}, 'Pd': {12.}}` + + :param bulk_atoms: The bulk structure. + :type bulk_atoms: ase.Atoms + + :returns: **bulk_cn_dict** -- A dictionary whose keys are the elements of the atoms and whose values + are their possible coordination numbers. + :rtype: dict + + +.. py:function:: compute_slabs(bulk_atoms: ase.Atoms = None, max_miller: int = 2, specific_millers: list = None) + + Enumerates all the symmetrically distinct slabs of a bulk structure. + It will not enumerate slabs with Miller indices above the + `max_miller` argument. Note that we also look at the bottoms of slabs + if they are distinct from the top. If they are distinct, we flip the + surface so the bottom is pointing upwards. + + :param bulk_atoms: The bulk structure. + :type bulk_atoms: ase.Atoms + :param max_miller: The maximum Miller index of the slabs to enumerate. Increasing this + argument will increase the number of slabs, and the slabs will generally + become larger. + :type max_miller: int + :param specific_millers: A list of Miller indices that you want to enumerate. If this argument + is not `None`, then the `max_miller` argument is ignored. + :type specific_millers: list + + :returns: **all_slabs_info** -- A list of 5-tuples containing pymatgen structure objects for enumerated + slabs, the Miller indices, floats for the shifts, booleans for top, and + the oriented bulk structure. + :rtype: list + + +.. py:function:: flip_struct(struct: pymatgen.core.structure.Structure) + + Flips an atoms object upside down. Normally used to flip slabs. + + :param struct: pymatgen structure object of the surface you want to flip + :type struct: Structure + + :returns: **flipped_struct** -- pymatgen structure object of the flipped surface. + :rtype: Structure + + +.. py:function:: is_structure_invertible(struct: pymatgen.core.structure.Structure) + + This function figures out whether or not an `Structure` + object has symmetricity. In this function, the affine matrix is a rotation + matrix that is multiplied with the XYZ positions of the crystal. If the z,z + component of that is negative, it means symmetry operation exist, it could + be a mirror operation, or one that involves multiple rotations/etc. + Regardless, it means that the top becomes the bottom and vice-versa, and the + structure is the symmetric. i.e. structure_XYZ = structure_XYZ*M. + + In short: If this function returns `False`, then the input structure can + be flipped in the z-direction to create a new structure. + + :param struct: pymatgen structure object of the slab. + :type struct: Structure + + :returns: * A boolean indicating whether or not your `ase.Atoms` object is + * *symmetric in z-direction (i.e. symmetric with respect to x-y plane).* + + +.. py:function:: standardize_bulk(atoms: ase.Atoms) + + There are many ways to define a bulk unit cell. If you change the unit + cell itself but also change the locations of the atoms within the unit + cell, you can effectively get the same bulk structure. To address this, + there is a standardization method used to reduce the degrees of freedom + such that each unit cell only has one "true" configuration. This + function will align a unit cell you give it to fit within this + standardization. + + :param atoms: `ase.Atoms` object of the bulk you want to standardize. + :type atoms: ase.Atoms + + :returns: **standardized_struct** -- pymatgen structure object of the standardized bulk. + :rtype: Structure + + diff --git a/_sources/autoapi/data/oc/databases/index.rst b/_sources/autoapi/data/oc/databases/index.rst new file mode 100644 index 000000000..b8f3075c6 --- /dev/null +++ b/_sources/autoapi/data/oc/databases/index.rst @@ -0,0 +1,24 @@ +:py:mod:`data.oc.databases` +=========================== + +.. py:module:: data.oc.databases + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + pkls/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + update/index.rst + + diff --git a/_sources/autoapi/data/oc/databases/pkls/index.rst b/_sources/autoapi/data/oc/databases/pkls/index.rst new file mode 100644 index 000000000..f0e2a6a9d --- /dev/null +++ b/_sources/autoapi/data/oc/databases/pkls/index.rst @@ -0,0 +1,17 @@ +:py:mod:`data.oc.databases.pkls` +================================ + +.. py:module:: data.oc.databases.pkls + + +Package Contents +---------------- + +.. py:data:: BULK_PKL_PATH + + + +.. py:data:: ADSORBATES_PKL_PATH + + + diff --git a/_sources/autoapi/data/oc/databases/update/index.rst b/_sources/autoapi/data/oc/databases/update/index.rst new file mode 100644 index 000000000..8bc39c042 --- /dev/null +++ b/_sources/autoapi/data/oc/databases/update/index.rst @@ -0,0 +1,40 @@ +:py:mod:`data.oc.databases.update` +================================== + +.. py:module:: data.oc.databases.update + +.. autoapi-nested-parse:: + + Script for updating ase pkl and db files from v3.19 to v3.21. + Run it with ase v3.19. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.databases.update.pbc_patch + data.oc.databases.update.set_pbc_patch + data.oc.databases.update.update_pkls + data.oc.databases.update.update_dbs + + + +.. py:function:: pbc_patch(self) + + +.. py:function:: set_pbc_patch(self, pbc) + + +.. py:function:: update_pkls() + + +.. py:function:: update_dbs() + + diff --git a/_sources/autoapi/data/oc/experimental/get_energies/index.rst b/_sources/autoapi/data/oc/experimental/get_energies/index.rst new file mode 100644 index 000000000..0b7d64f02 --- /dev/null +++ b/_sources/autoapi/data/oc/experimental/get_energies/index.rst @@ -0,0 +1,39 @@ +:py:mod:`data.oc.experimental.get_energies` +=========================================== + +.. py:module:: data.oc.experimental.get_energies + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.get_energies.extract_file + data.oc.experimental.get_energies.process_func + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.get_energies.input_folder + + +.. py:function:: extract_file(zipname, file_to_unzip, extract_to) + + +.. py:function:: process_func(indices, dirlist, ans) + + +.. py:data:: input_folder + :value: 'temp_download/' + + + diff --git a/_sources/autoapi/data/oc/experimental/merge_traj/index.rst b/_sources/autoapi/data/oc/experimental/merge_traj/index.rst new file mode 100644 index 000000000..ae550dc65 --- /dev/null +++ b/_sources/autoapi/data/oc/experimental/merge_traj/index.rst @@ -0,0 +1,29 @@ +:py:mod:`data.oc.experimental.merge_traj` +========================================= + +.. py:module:: data.oc.experimental.merge_traj + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.merge_traj.extract_file + data.oc.experimental.merge_traj.main + + + +.. py:function:: extract_file(zipname, file_to_unzip, extract_to) + + +.. py:function:: main() + + Given a directory containing adsorbate subdirectories, loops through all + runs and merges intermediate checkpoints into a single, full trajectory. + + diff --git a/_sources/autoapi/data/oc/experimental/perturb_systems/index.rst b/_sources/autoapi/data/oc/experimental/perturb_systems/index.rst new file mode 100644 index 000000000..f164929e2 --- /dev/null +++ b/_sources/autoapi/data/oc/experimental/perturb_systems/index.rst @@ -0,0 +1,26 @@ +:py:mod:`data.oc.experimental.perturb_systems` +============================================== + +.. py:module:: data.oc.experimental.perturb_systems + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.perturb_systems.main + + + +.. py:function:: main() + + Rattles every image along a relaxation pathway at 5 different variances. + Rattled images are then put in their own directory along with the input + files necessary to run VASP calculations. + + diff --git a/_sources/autoapi/data/oc/experimental/rattle_test/index.rst b/_sources/autoapi/data/oc/experimental/rattle_test/index.rst new file mode 100644 index 000000000..4d837262b --- /dev/null +++ b/_sources/autoapi/data/oc/experimental/rattle_test/index.rst @@ -0,0 +1,25 @@ +:py:mod:`data.oc.experimental.rattle_test` +========================================== + +.. py:module:: data.oc.experimental.rattle_test + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.rattle_test.main + + + +.. py:function:: main() + + Checks whether ASE's rattle modifies fixed atoms. + ' + + diff --git a/_sources/autoapi/data/oc/experimental/utils/index.rst b/_sources/autoapi/data/oc/experimental/utils/index.rst new file mode 100644 index 000000000..f40677b3c --- /dev/null +++ b/_sources/autoapi/data/oc/experimental/utils/index.rst @@ -0,0 +1,53 @@ +:py:mod:`data.oc.experimental.utils` +==================================== + +.. py:module:: data.oc.experimental.utils + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.experimental.utils.v0_check + data.oc.experimental.utils.restart_bug_check + data.oc.experimental.utils.plot_traj + + + +.. py:function:: v0_check(full_traj, initial) + + Checks whether the initial structure as gathered from the POSCAR input file + is in agreement with the initial image of the full trajectory. If not, the + trajectory comes fro the V0 dataset which failed to save intermediate + checkpoints. + + Args + full_traj (list of Atoms objects): Calculated full trajectory. + initial (Atoms object): Starting image provided by POSCAR.. + + +.. py:function:: restart_bug_check(full_traj) + + Observed that some of the trajectories had a strange identically cyclical + behavior - suggesting that a checkpoint was restarted from an earlier + checkpoint rather than the latest. Checks whether the trajectory provided + falls within that bug. + + Args + full_traj (list of Atoms objects): Calculated full trajectory. + + +.. py:function:: plot_traj(traj, fname) + + Plots the energy profile of a given trajectory + + Args + traj (list of Atoms objects): Full trajectory to be plotted + fname (str): Filename to be used as title and save figure as. + + diff --git a/_sources/autoapi/data/oc/index.rst b/_sources/autoapi/data/oc/index.rst new file mode 100644 index 000000000..4efeb8a9e --- /dev/null +++ b/_sources/autoapi/data/oc/index.rst @@ -0,0 +1,27 @@ +:py:mod:`data.oc` +================= + +.. py:module:: data.oc + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + core/index.rst + databases/index.rst + tests/index.rst + utils/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + structure_generator/index.rst + + diff --git a/_sources/autoapi/data/oc/scripts/precompute_sample_structures/index.rst b/_sources/autoapi/data/oc/scripts/precompute_sample_structures/index.rst new file mode 100644 index 000000000..d0d14b9eb --- /dev/null +++ b/_sources/autoapi/data/oc/scripts/precompute_sample_structures/index.rst @@ -0,0 +1,138 @@ +:py:mod:`data.oc.scripts.precompute_sample_structures` +====================================================== + +.. py:module:: data.oc.scripts.precompute_sample_structures + +.. autoapi-nested-parse:: + + This submodule contains the scripts that the we used to sample the adsorption + structures. + + Note that some of these scripts were taken from + [GASpy](https://github.com/ulissigroup/GASpy) with permission of author. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.scripts.precompute_sample_structures.enumerate_surfaces_for_saving + data.oc.scripts.precompute_sample_structures.standardize_bulk + data.oc.scripts.precompute_sample_structures.is_structure_invertible + data.oc.scripts.precompute_sample_structures.flip_struct + data.oc.scripts.precompute_sample_structures.precompute_enumerate_surface + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.scripts.precompute_sample_structures.__authors__ + data.oc.scripts.precompute_sample_structures.__email__ + data.oc.scripts.precompute_sample_structures.BULK_PKL + data.oc.scripts.precompute_sample_structures.MAX_MILLER + data.oc.scripts.precompute_sample_structures.s + + +.. py:data:: __authors__ + :value: ['Kevin Tran', 'Aini Palizhati', 'Siddharth Goyal', 'Zachary Ulissi'] + + + +.. py:data:: __email__ + :value: ['ktran@andrew.cmu.edu'] + + + +.. py:data:: BULK_PKL + :value: '/fill/this/in/with/path/to/bulk/pkl/file' + + + +.. py:data:: MAX_MILLER + :value: 2 + + + +.. py:function:: enumerate_surfaces_for_saving(bulk_atoms, max_miller=MAX_MILLER) + + Enumerate all the symmetrically distinct surfaces of a bulk structure. It + will not enumerate surfaces with Miller indices above the `max_miller` + argument. Note that we also look at the bottoms of surfaces if they are + distinct from the top. If they are distinct, we flip the surface so the bottom + is pointing upwards. + + :param bulk_atoms `ase.Atoms` object of the bulk you want to enumerate: surfaces from. + :param max_miller An integer indicating the maximum Miller index of the surfaces: you are willing to enumerate. Increasing this argument will + increase the number of surfaces, but the surfaces will + generally become larger. + + :returns: + + `pymatgen.Structure` + objects for surfaces we have enumerated, the Miller + indices, floats for the shifts, and Booleans for "top". + :rtype: all_slabs_info A list of 4-tuples containing + + +.. py:function:: standardize_bulk(atoms) + + There are many ways to define a bulk unit cell. If you change the unit cell + itself but also change the locations of the atoms within the unit cell, you + can get effectively the same bulk structure. To address this, there is a + standardization method used to reduce the degrees of freedom such that each + unit cell only has one "true" configuration. This function will align a + unit cell you give it to fit within this standardization. + + Arg: + atoms `ase.Atoms` object of the bulk you want to standardize + :returns: standardized_struct `pymatgen.Structure` of the standardized bulk + + +.. py:function:: is_structure_invertible(structure) + + This function figures out whether or not an `pymatgen.Structure` object has + symmetricity. In this function, the affine matrix is a rotation matrix that + is multiplied with the XYZ positions of the crystal. If the z,z component + of that is negative, it means symmetry operation exist, it could be a + mirror operation, or one that involves multiple rotations/etc. Regardless, + it means that the top becomes the bottom and vice-versa, and the structure + is the symmetric. i.e. structure_XYZ = structure_XYZ*M. + + In short: If this function returns `False`, then the input structure can + be flipped in the z-direction to create a new structure. + + Arg: + structure A `pymatgen.Structure` object. + Returns + A boolean indicating whether or not your `ase.Atoms` object is + symmetric in z-direction (i.e. symmetric with respect to x-y plane). + + +.. py:function:: flip_struct(struct) + + Flips an atoms object upside down. Normally used to flip surfaces. + + Arg: + atoms `pymatgen.Structure` object + :returns: + + flipped_struct The same `ase.Atoms` object that was fed as an + argument, but flipped upside down. + + +.. py:function:: precompute_enumerate_surface(bulk_database, bulk_index, opfile) + + +.. py:data:: s + + + diff --git a/_sources/autoapi/data/oc/structure_generator/index.rst b/_sources/autoapi/data/oc/structure_generator/index.rst new file mode 100644 index 000000000..e59926ff1 --- /dev/null +++ b/_sources/autoapi/data/oc/structure_generator/index.rst @@ -0,0 +1,105 @@ +:py:mod:`data.oc.structure_generator` +===================================== + +.. py:module:: data.oc.structure_generator + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.structure_generator.StructureGenerator + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.structure_generator.write_surface + data.oc.structure_generator.parse_args + data.oc.structure_generator.precompute_slabs + data.oc.structure_generator.run_placements + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.structure_generator.args + + +.. py:class:: StructureGenerator(args, bulk_index, surface_index, adsorbate_index) + + + A class that creates adsorbate/bulk/slab objects given specified indices, + and writes vasp input files and metadata for multiple placements of the adsorbate + on the slab. You can choose random, heuristic, or both types of placements. + + The output directory structure will have the following nested structure, + where "files" represents the vasp input files and the metadata.pkl: + outputdir/ + bulk0/ + surface0/ + surface/files + ads0/ + heur0/files + heur1/files + rand0/files + ... + ads1/ + ... + surface1/ + ... + bulk1/ + ... + + Precomputed surfaces will be calculated and saved out if they don't + already exist in the provided directory. + + :param args: Contains all command line args + :type args: argparse.Namespace + :param bulk_index: Index of the bulk within the bulk db + :type bulk_index: int + :param surface_index: Index of the surface in the list of all possible surfaces + :type surface_index: int + :param adsorbate_index: Index of the adsorbate within the adsorbate db + :type adsorbate_index: int + + .. py:method:: run() + + Create adsorbate/bulk/surface objects, generate adslab placements, + and write to files. + + + .. py:method:: _write_adslabs(adslab_obj, mode_str) + + Write one set of adslabs (called separately for random and heurstic placements) + + + +.. py:function:: write_surface(args, slab, bulk_index, surface_index) + + Writes vasp inputs and metadata for a specified slab + + +.. py:function:: parse_args() + + +.. py:function:: precompute_slabs(bulk_ind) + + +.. py:function:: run_placements(inputs) + + +.. py:data:: args + + + diff --git a/_sources/autoapi/data/oc/tests/index.rst b/_sources/autoapi/data/oc/tests/index.rst new file mode 100644 index 000000000..a29711958 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/index.rst @@ -0,0 +1,20 @@ +:py:mod:`data.oc.tests` +======================= + +.. py:module:: data.oc.tests + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_adsorbate/index.rst + test_adsorbate_slab_config/index.rst + test_bulk/index.rst + test_inputs/index.rst + test_multi_adsorbate_slab_config/index.rst + test_slab/index.rst + + diff --git a/_sources/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.rst b/_sources/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.rst new file mode 100644 index 000000000..390f96a66 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.rst @@ -0,0 +1,70 @@ +:py:mod:`data.oc.tests.old_tests.check_energy_and_forces` +========================================================= + +.. py:module:: data.oc.tests.old_tests.check_energy_and_forces + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.check_energy_and_forces.check_relaxed_forces + data.oc.tests.old_tests.check_energy_and_forces.check_adsorption_energy + data.oc.tests.old_tests.check_energy_and_forces.check_DFT_energy + data.oc.tests.old_tests.check_energy_and_forces.check_positions_across_frames_are_different + data.oc.tests.old_tests.check_energy_and_forces.read_pkl + data.oc.tests.old_tests.check_energy_and_forces.run_checks + data.oc.tests.old_tests.check_energy_and_forces.create_parser + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.check_energy_and_forces.parser + + +.. py:function:: check_relaxed_forces(sid, path, thres) + + Check all forces in the final frame of adslab is less than a threshold. + + +.. py:function:: check_adsorption_energy(sid, path, ref_energy, adsorption_energy) + + +.. py:function:: check_DFT_energy(sid, path, e_tol=0.05) + + Given a relaxation trajectory, check to see if 1. final energy is less than the initial + energy, raise error if not. 2) If the energy decreases throuhghout a trajectory (small spikes are okay). + And 3) if 2 fails, check if it's just a matter of tolerance being too strict by + considering only the first quarter of the trajectory and sampling every 10th frame + to check for _almost_ monotonic decrease in energies. + If any frame(i+1) energy is higher than frame(i) energy, flag it and plot the trajectory. + + +.. py:function:: check_positions_across_frames_are_different(sid, path) + + Given a relaxation trajectory, make sure positions for two consecutive + frames are not identical. + + +.. py:function:: read_pkl(fname) + + +.. py:function:: run_checks(args) + + +.. py:function:: create_parser() + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/data/oc/tests/old_tests/check_inputs/index.rst b/_sources/autoapi/data/oc/tests/old_tests/check_inputs/index.rst new file mode 100644 index 000000000..28a96af20 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/old_tests/check_inputs/index.rst @@ -0,0 +1,96 @@ +:py:mod:`data.oc.tests.old_tests.check_inputs` +============================================== + +.. py:module:: data.oc.tests.old_tests.check_inputs + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.check_inputs.obtain_metadata + data.oc.tests.old_tests.check_inputs.create_df + data.oc.tests.old_tests.check_inputs.adslabs_are_unique + data.oc.tests.old_tests.check_inputs.check_commonelems + data.oc.tests.old_tests.check_inputs.is_adsorbate_placed_correct + data.oc.tests.old_tests.check_inputs._get_connectivity + + + +.. py:function:: obtain_metadata(input_dir, split) + + Get the metadata provided input directory and split of data. + :param input_dir: + :type input_dir: str + :param split: 'val_ood_cat/ads/both', and 'test_ood_cat/ads/both'. + :type split: str + + :returns: + + metadata (tuple) adslab properties. + Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') + + +.. py:function:: create_df(metadata_lst, df_name=None) + + Create a df from metadata to used check_dataset.py file + :param metadata_lst A list of adslab properties in tuple form: contain (mpid, miller index, shift, top, adsorbate smile string, + adsorption cartesion coordinates tuple, and which split the data belongs to). + Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') + :param each tuple should: contain (mpid, miller index, shift, top, adsorbate smile string, + adsorption cartesion coordinates tuple, and which split the data belongs to). + Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') + + :returns: df A pandas DataFrame + + +.. py:function:: adslabs_are_unique(df, unique_by=['mpid', 'miller', 'shift', 'top', 'adsorbate', 'adsorption_site']) + + Test if there are duplicate adslabs given a df. If the input is another + format, convert it to df first. + :param df A pd.DataFrame containing metadata of the adslabs being checked.: + :param unique_by df column names that are used to detect duplicates. The default: list is the fingerprints represent a unique adslab. + + +.. py:function:: check_commonelems(df, split1, split2, check='adsorbate') + + Given a df containing all the metadata of the calculations, check to see if there are + any bulk or adsorbate duplicates between train and val/test_ood. The dataframe should + have a "split_tag" column indicate which split (i.e. train, val_ood_ads, etc) a data belongs to. + :param df A pd.DataFrame containing metadata of the adslabs being checked.: + :param split1: 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. + :param split2 two of the splits from 'train': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. + :param 'val_id': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. + :param 'test_id': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. + :param : 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. + + +.. py:function:: is_adsorbate_placed_correct(adslab_input, atoms_tag) + + Make sure all adsorbate atoms are connected after placement. + False means there is at least one isolated adsorbate atom. + It should be used after input generation but before DFT to avoid + unneccessarily computations. + :param adslab_input `ase.Atoms` of the structure in its initial state: + :param atoms_tag: + :type atoms_tag: list + + :returns: + + boolean If there is any stand alone adsorbate atoms after placement, + return False. + + +.. py:function:: _get_connectivity(atoms) + + Generate the connectivity of an atoms obj. + :param atoms An `ase.Atoms` object: + + :returns: matrix The connectivity matrix of the atoms object. + + diff --git a/_sources/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst b/_sources/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst new file mode 100644 index 000000000..a22794b04 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst @@ -0,0 +1,53 @@ +:py:mod:`data.oc.tests.old_tests.compare_inputs_and_trajectory` +=============================================================== + +.. py:module:: data.oc.tests.old_tests.compare_inputs_and_trajectory + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.compare_inputs_and_trajectory.get_starting_structure_from_input_dir + data.oc.tests.old_tests.compare_inputs_and_trajectory.min_diff + data.oc.tests.old_tests.compare_inputs_and_trajectory.compare + data.oc.tests.old_tests.compare_inputs_and_trajectory.read_pkl + data.oc.tests.old_tests.compare_inputs_and_trajectory.create_parser + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.compare_inputs_and_trajectory.parser + + +.. py:function:: get_starting_structure_from_input_dir(input_dir) + + +.. py:function:: min_diff(atoms_init, atoms_final) + + Calculate atom wise distances of two atoms object, + taking into account periodic boundary conditions. + + +.. py:function:: compare(args) + + +.. py:function:: read_pkl(fname) + + +.. py:function:: create_parser() + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/data/oc/tests/old_tests/verify_correctness/index.rst b/_sources/autoapi/data/oc/tests/old_tests/verify_correctness/index.rst new file mode 100644 index 000000000..d9b51929b --- /dev/null +++ b/_sources/autoapi/data/oc/tests/old_tests/verify_correctness/index.rst @@ -0,0 +1,42 @@ +:py:mod:`data.oc.tests.old_tests.verify_correctness` +==================================================== + +.. py:module:: data.oc.tests.old_tests.verify_correctness + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.verify_correctness.compare_runs + data.oc.tests.old_tests.verify_correctness.create_parser + data.oc.tests.old_tests.verify_correctness.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.old_tests.verify_correctness.parser + + +.. py:function:: compare_runs(path1, path2, reference_type, tol) + + +.. py:function:: create_parser() + + +.. py:function:: main(args) + + +.. py:data:: parser + + + diff --git a/_sources/autoapi/data/oc/tests/test_adsorbate/index.rst b/_sources/autoapi/data/oc/tests/test_adsorbate/index.rst new file mode 100644 index 000000000..b405f8c43 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_adsorbate/index.rst @@ -0,0 +1,64 @@ +:py:mod:`data.oc.tests.test_adsorbate` +====================================== + +.. py:module:: data.oc.tests.test_adsorbate + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_adsorbate.TestAdsorbate + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_adsorbate._test_db + data.oc.tests.test_adsorbate._test_db_old + + +.. py:data:: _test_db + + + +.. py:data:: _test_db_old + + + +.. py:class:: TestAdsorbate + + + .. py:method:: test_adsorbate_init_from_id() + + + .. py:method:: test_adsorbate_init_from_smiles() + + + .. py:method:: test_adsorbate_init_random() + + + .. py:method:: test_adsorbate_init_from_id_with_db() + + + .. py:method:: test_adsorbate_init_from_smiles_with_db() + + + .. py:method:: test_adsorbate_init_random_with_db() + + + .. py:method:: test_adsorbate_init_reaction_string() + + + .. py:method:: test_adsorbate_init_reaction_string_with_old_db() + + + diff --git a/_sources/autoapi/data/oc/tests/test_adsorbate_slab_config/index.rst b/_sources/autoapi/data/oc/tests/test_adsorbate_slab_config/index.rst new file mode 100644 index 000000000..fbe53733a --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_adsorbate_slab_config/index.rst @@ -0,0 +1,51 @@ +:py:mod:`data.oc.tests.test_adsorbate_slab_config` +================================================== + +.. py:module:: data.oc.tests.test_adsorbate_slab_config + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_adsorbate_slab_config.TestAdslab + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_adsorbate_slab_config.load_data + + + +.. py:function:: load_data(request) + + +.. py:class:: TestAdslab + + + .. py:method:: test_adslab_init() + + + .. py:method:: test_num_augmentations_per_site() + + + .. py:method:: test_placement_overlap() + + Test that the adsorbate does not overlap with the slab. + + + .. py:method:: test_is_adsorbate_com_on_normal() + + + .. py:method:: test_is_adsorbate_binding_atom_on_normal() + + + diff --git a/_sources/autoapi/data/oc/tests/test_bulk/index.rst b/_sources/autoapi/data/oc/tests/test_bulk/index.rst new file mode 100644 index 000000000..b90c9fdcd --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_bulk/index.rst @@ -0,0 +1,76 @@ +:py:mod:`data.oc.tests.test_bulk` +================================= + +.. py:module:: data.oc.tests.test_bulk + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_bulk.TestBulk + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_bulk.load_bulk + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_bulk._test_db + + +.. py:function:: load_bulk(request) + + +.. py:data:: _test_db + + + +.. py:class:: TestBulk + + + .. py:method:: test_bulk_init_from_id() + + + .. py:method:: test_bulk_init_from_src_id() + + + .. py:method:: test_bulk_init_random() + + + .. py:method:: test_bulk_init_from_id_with_db() + + + .. py:method:: test_bulk_init_from_src_id_with_db() + + + .. py:method:: test_bulk_init_random_with_db() + + + .. py:method:: test_unique_slab_enumeration() + + + .. py:method:: test_precomputed_slab() + + + .. py:method:: test_slab_miller_enumeration() + + + .. py:method:: get_max_miller(slabs) + + + diff --git a/_sources/autoapi/data/oc/tests/test_inputs/index.rst b/_sources/autoapi/data/oc/tests/test_inputs/index.rst new file mode 100644 index 000000000..37b04e847 --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_inputs/index.rst @@ -0,0 +1,40 @@ +:py:mod:`data.oc.tests.test_inputs` +=================================== + +.. py:module:: data.oc.tests.test_inputs + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_inputs.TestVasp + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_inputs.load_data + + + +.. py:function:: load_data(request) + + +.. py:class:: TestVasp + + + .. py:method:: test_cleanup() + + + .. py:method:: test_unique_kpts() + + + diff --git a/_sources/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.rst b/_sources/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.rst new file mode 100644 index 000000000..5f82e34bb --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.rst @@ -0,0 +1,47 @@ +:py:mod:`data.oc.tests.test_multi_adsorbate_slab_config` +======================================================== + +.. py:module:: data.oc.tests.test_multi_adsorbate_slab_config + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_multi_adsorbate_slab_config.TestMultiAdslab + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_multi_adsorbate_slab_config.load_data + + + +.. py:function:: load_data(request) + + +.. py:class:: TestMultiAdslab + + + .. py:method:: test_num_configurations() + + + .. py:method:: test_adsorbate_indices() + + Test that the adsorbate indices correspond to the unique adsorbates. + + + .. py:method:: test_placement_overlap() + + Test that the adsorbate sites do not overlap with each other. + + + diff --git a/_sources/autoapi/data/oc/tests/test_slab/index.rst b/_sources/autoapi/data/oc/tests/test_slab/index.rst new file mode 100644 index 000000000..daad7282f --- /dev/null +++ b/_sources/autoapi/data/oc/tests/test_slab/index.rst @@ -0,0 +1,32 @@ +:py:mod:`data.oc.tests.test_slab` +================================= + +.. py:module:: data.oc.tests.test_slab + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.tests.test_slab.TestSlab + + + + +.. py:class:: TestSlab + + + .. py:method:: test_slab_init_from_id() + + + .. py:method:: test_slab_init_from_specific_millers() + + + .. py:method:: test_slab_init_random() + + + diff --git a/_sources/autoapi/data/oc/utils/flag_anomaly/index.rst b/_sources/autoapi/data/oc/utils/flag_anomaly/index.rst new file mode 100644 index 000000000..9e77d6a61 --- /dev/null +++ b/_sources/autoapi/data/oc/utils/flag_anomaly/index.rst @@ -0,0 +1,72 @@ +:py:mod:`data.oc.utils.flag_anomaly` +==================================== + +.. py:module:: data.oc.utils.flag_anomaly + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.utils.flag_anomaly.DetectTrajAnomaly + + + + +.. py:class:: DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5) + + + .. py:method:: is_adsorbate_dissociated() + + Tests if the initial adsorbate connectivity is maintained. + + :returns: True if the connectivity was not maintained, otherwise False + :rtype: (bool) + + + .. py:method:: has_surface_changed() + + Tests bond breaking / forming events within a tolerance on the surface so + that systems with significant adsorbate induces surface changes may be discarded + since the reference to the relaxed slab may no longer be valid. + + :returns: True if the surface is reconstructed, otherwise False + :rtype: (bool) + + + .. py:method:: is_adsorbate_desorbed() + + If the adsorbate binding atoms have no connection with slab atoms, + consider it desorbed. + + :returns: True if there is desorption, otherwise False + :rtype: (bool) + + + .. py:method:: _get_connectivity(atoms, cutoff_multiplier=1.0) + + Generate the connectivity of an atoms obj. + + :param atoms: object which will have its connectivity considered + :type atoms: ase.Atoms + :param cutoff_multiplier: cushion for small atom movements when assessing + atom connectivity + :type cutoff_multiplier: float, optional + + :returns: The connectivity matrix of the atoms object. + :rtype: (np.ndarray) + + + .. py:method:: is_adsorbate_intercalated() + + Ensure the adsorbate isn't interacting with an atom that is not allowed to relax. + + :returns: True if any adsorbate atom neighbors a frozen atom, otherwise False + :rtype: (bool) + + + diff --git a/_sources/autoapi/data/oc/utils/index.rst b/_sources/autoapi/data/oc/utils/index.rst new file mode 100644 index 000000000..55f044ea0 --- /dev/null +++ b/_sources/autoapi/data/oc/utils/index.rst @@ -0,0 +1,82 @@ +:py:mod:`data.oc.utils` +======================= + +.. py:module:: data.oc.utils + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + flag_anomaly/index.rst + vasp/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + data.oc.utils.DetectTrajAnomaly + + + + +.. py:class:: DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5) + + + .. py:method:: is_adsorbate_dissociated() + + Tests if the initial adsorbate connectivity is maintained. + + :returns: True if the connectivity was not maintained, otherwise False + :rtype: (bool) + + + .. py:method:: has_surface_changed() + + Tests bond breaking / forming events within a tolerance on the surface so + that systems with significant adsorbate induces surface changes may be discarded + since the reference to the relaxed slab may no longer be valid. + + :returns: True if the surface is reconstructed, otherwise False + :rtype: (bool) + + + .. py:method:: is_adsorbate_desorbed() + + If the adsorbate binding atoms have no connection with slab atoms, + consider it desorbed. + + :returns: True if there is desorption, otherwise False + :rtype: (bool) + + + .. py:method:: _get_connectivity(atoms, cutoff_multiplier=1.0) + + Generate the connectivity of an atoms obj. + + :param atoms: object which will have its connectivity considered + :type atoms: ase.Atoms + :param cutoff_multiplier: cushion for small atom movements when assessing + atom connectivity + :type cutoff_multiplier: float, optional + + :returns: The connectivity matrix of the atoms object. + :rtype: (np.ndarray) + + + .. py:method:: is_adsorbate_intercalated() + + Ensure the adsorbate isn't interacting with an atom that is not allowed to relax. + + :returns: True if any adsorbate atom neighbors a frozen atom, otherwise False + :rtype: (bool) + + + diff --git a/_sources/autoapi/data/oc/utils/vasp/index.rst b/_sources/autoapi/data/oc/utils/vasp/index.rst new file mode 100644 index 000000000..9fac178ee --- /dev/null +++ b/_sources/autoapi/data/oc/utils/vasp/index.rst @@ -0,0 +1,93 @@ +:py:mod:`data.oc.utils.vasp` +============================ + +.. py:module:: data.oc.utils.vasp + +.. autoapi-nested-parse:: + + This submodule contains the scripts that the we used to run VASP. + + Note that some of these scripts were taken and modified from + [GASpy](https://github.com/ulissigroup/GASpy) with permission of authors. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.oc.utils.vasp._clean_up_inputs + data.oc.utils.vasp.calculate_surface_k_points + data.oc.utils.vasp.write_vasp_input_files + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.oc.utils.vasp.__author__ + data.oc.utils.vasp.__email__ + data.oc.utils.vasp.VASP_FLAGS + data.oc.utils.vasp.BULK_VASP_FLAGS + + +.. py:data:: __author__ + :value: 'Kevin Tran' + + + +.. py:data:: __email__ + :value: 'ktran@andrew.cmu.edu' + + + +.. py:data:: VASP_FLAGS + + + +.. py:data:: BULK_VASP_FLAGS + + + +.. py:function:: _clean_up_inputs(atoms, vasp_flags) + + Parses the inputs and makes sure some things are straightened out. + + Arg: + atoms `ase.Atoms` object of the structure we want to relax + vasp_flags A dictionary of settings we want to pass to the `Vasp` + calculator + :returns: + + atoms `ase.Atoms` object of the structure we want to relax, but + with the unit vectors fixed (if needed) + vasp_flags A modified version of the 'vasp_flags' argument + + +.. py:function:: calculate_surface_k_points(atoms) + + For surface calculations, it's a good practice to calculate the k-point + mesh given the unit cell size. We do that on-the-spot here. + + Arg: + atoms `ase.Atoms` object of the structure we want to relax + :returns: k_pts A 3-tuple of integers indicating the k-point mesh to use + + +.. py:function:: write_vasp_input_files(atoms, outdir='.', vasp_flags=None) + + Effectively goes through the same motions as the `run_vasp` function, + except it only writes the input files instead of running. + + :param atoms `ase.Atoms` object that we want to relax.: + :param outdir A string indicating where you want to save the input files.: Defaults to '.' + :param vasp_flags A dictionary of settings we want to pass to the `Vasp`: calculator. Defaults to a standerd set of values if `None` + + diff --git a/_sources/autoapi/data/odac/force_field/FF_analysis/index.rst b/_sources/autoapi/data/odac/force_field/FF_analysis/index.rst new file mode 100644 index 000000000..97bfda83e --- /dev/null +++ b/_sources/autoapi/data/odac/force_field/FF_analysis/index.rst @@ -0,0 +1,67 @@ +:py:mod:`data.odac.force_field.FF_analysis` +=========================================== + +.. py:module:: data.odac.force_field.FF_analysis + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.odac.force_field.FF_analysis.get_data + data.odac.force_field.FF_analysis.binned_average + data.odac.force_field.FF_analysis.bin_plot + data.odac.force_field.FF_analysis.get_Fig4a + data.odac.force_field.FF_analysis.get_Fig4b + data.odac.force_field.FF_analysis.get_Fig4c + data.odac.force_field.FF_analysis.get_Fig4d + data.odac.force_field.FF_analysis.phys_err + data.odac.force_field.FF_analysis.chem_err + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.odac.force_field.FF_analysis.infile + + +.. py:function:: get_data(infile, limit=2) + + +.. py:function:: binned_average(DFT_ads, pred_err, bins) + + +.. py:function:: bin_plot(ax, bins, heights, **kwargs) + + +.. py:function:: get_Fig4a(raw_error_CO2, raw_error_H2O, b=20, outfile='Fig5a.png') + + +.. py:function:: get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O, outfile='Fig5b.png') + + +.. py:function:: get_Fig4c(DFT_CO2, err_CO2, outfile='Fig5c.png') + + +.. py:function:: get_Fig4d(DFT_H2O, err_H2O, outfile='Fig5d.png') + + +.. py:function:: phys_err(DFT, FF) + + +.. py:function:: chem_err(DFT, FF) + + +.. py:data:: infile + :value: '/storage/home/hcoda1/8/lbrabson3/p-amedford6-0/s2ef/final/data_w_oms.json' + + + diff --git a/_sources/autoapi/data/odac/index.rst b/_sources/autoapi/data/odac/index.rst new file mode 100644 index 000000000..020f66de6 --- /dev/null +++ b/_sources/autoapi/data/odac/index.rst @@ -0,0 +1,6 @@ +:py:mod:`data.odac` +=================== + +.. py:module:: data.odac + + diff --git a/_sources/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.rst b/_sources/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.rst new file mode 100644 index 000000000..678b3db9e --- /dev/null +++ b/_sources/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.rst @@ -0,0 +1,208 @@ +:py:mod:`data.odac.promising_mof.promising_mof_energies.energy` +=============================================================== + +.. py:module:: data.odac.promising_mof.promising_mof_energies.energy + + +Module Contents +--------------- + +.. py:data:: raw_ads_energy_data + + + +.. py:data:: complete_data + + + +.. py:data:: temp_split_string + + + +.. py:data:: complete_data + + + +.. py:data:: complete_data_merged_pristine + + + +.. py:data:: complete_data_merged_pristine + + + +.. py:data:: complete_data_merged_defective + + + +.. py:data:: complete_data_merged_defective + + + +.. py:data:: complete_data_merged_pristine_co2 + + + +.. py:data:: complete_data_merged_pristine_h2o + + + +.. py:data:: complete_data_merged_pristine_co_ads + + + +.. py:data:: complete_data_merged_pristine_co_ads_2 + + + +.. py:data:: complete_data_merged_defective_co2 + + + +.. py:data:: complete_data_merged_defective_h2o + + + +.. py:data:: complete_data_merged_defective_co_ads + + + +.. py:data:: complete_data_merged_defective_co_ads_2 + + + +.. py:data:: lowest_energy_data_co2 + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_h2o + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_co_ads + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_co_ads_2 + + + +.. py:data:: current_entry + + + +.. py:data:: adsorption_data + + + +.. py:data:: count + :value: 0 + + + +.. py:data:: lowest_energy_data_co2_defective + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_h2o_defective + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_co_ads_defective + + + +.. py:data:: current_entry + + + +.. py:data:: lowest_energy_data_co_ads_2_defective + + + +.. py:data:: current_entry + + + +.. py:data:: adsorption_data_defective + + + +.. py:data:: unique_combinations_count + + + +.. py:data:: def_counts_df + + + +.. py:data:: mof_name + + + +.. py:data:: missing_DDEC + + + +.. py:data:: missing_DDEC_pristine + + + +.. py:data:: missing_DDEC_defective + + + +.. py:data:: index_drop_ddec_pristine + :value: [] + + + +.. py:data:: adsorption_data + + + +.. py:data:: index_drop_ddec_defective + :value: [] + + + +.. py:data:: adsorption_data_defective + + + +.. py:data:: adsorption_data + + + +.. py:data:: adsorption_data_defective + + + +.. py:data:: promising_pristine + + + +.. py:data:: promising_defective + + + diff --git a/_sources/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.rst b/_sources/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.rst new file mode 100644 index 000000000..5f4233c69 --- /dev/null +++ b/_sources/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.rst @@ -0,0 +1,30 @@ +:py:mod:`data.om.biomolecules.geom.sample_geom_drugs` +===================================================== + +.. py:module:: data.om.biomolecules.geom.sample_geom_drugs + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.om.biomolecules.geom.sample_geom_drugs.write_pickle + data.om.biomolecules.geom.sample_geom_drugs.parse_args + data.om.biomolecules.geom.sample_geom_drugs.main + + + +.. py:function:: write_pickle(data, path) + + +.. py:function:: parse_args() + + +.. py:function:: main() + + diff --git a/_sources/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst b/_sources/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst new file mode 100644 index 000000000..15ca16683 --- /dev/null +++ b/_sources/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst @@ -0,0 +1,26 @@ +:py:mod:`data.om.biomolecules.geom.write_geom_drugs_structures` +=============================================================== + +.. py:module:: data.om.biomolecules.geom.write_geom_drugs_structures + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.om.biomolecules.geom.write_geom_drugs_structures.parse_args + data.om.biomolecules.geom.write_geom_drugs_structures.main + + + +.. py:function:: parse_args() + + +.. py:function:: main() + + diff --git a/_sources/autoapi/data/om/index.rst b/_sources/autoapi/data/om/index.rst new file mode 100644 index 000000000..3c6e2a4ca --- /dev/null +++ b/_sources/autoapi/data/om/index.rst @@ -0,0 +1,6 @@ +:py:mod:`data.om` +================= + +.. py:module:: data.om + + diff --git a/_sources/autoapi/data/om/omdata/orca/calc/index.rst b/_sources/autoapi/data/om/omdata/orca/calc/index.rst new file mode 100644 index 000000000..136395ad8 --- /dev/null +++ b/_sources/autoapi/data/om/omdata/orca/calc/index.rst @@ -0,0 +1,66 @@ +:py:mod:`data.om.omdata.orca.calc` +================================== + +.. py:module:: data.om.omdata.orca.calc + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.om.omdata.orca.calc.write_orca_inputs + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + data.om.omdata.orca.calc.ORCA_FUNCTIONAL + data.om.omdata.orca.calc.ORCA_BASIS + data.om.omdata.orca.calc.ORCA_SIMPLE_INPUT + data.om.omdata.orca.calc.ORCA_BLOCKS + data.om.omdata.orca.calc.ORCA_ASE_SIMPLE_INPUT + data.om.omdata.orca.calc.OPT_PARAMETERS + + +.. py:data:: ORCA_FUNCTIONAL + :value: 'wB97M-V' + + + +.. py:data:: ORCA_BASIS + :value: 'def2-TZVPD' + + + +.. py:data:: ORCA_SIMPLE_INPUT + :value: ['EnGrad', 'RIJCOSX', 'def2/J', 'NoUseSym', 'DIIS', 'NOSOSCF', 'NormalConv', 'DEFGRID3', 'ALLPOP', 'NBO'] + + + +.. py:data:: ORCA_BLOCKS + :value: ['%scf Convergence Tight maxiter 300 end', '%elprop Dipole true Quadrupole true end', '%nbo... + + + +.. py:data:: ORCA_ASE_SIMPLE_INPUT + + + +.. py:data:: OPT_PARAMETERS + + + +.. py:function:: write_orca_inputs(atoms, output_directory, charge=0, mult=1, orcasimpleinput=ORCA_ASE_SIMPLE_INPUT, orcablocks=' '.join(ORCA_BLOCKS)) + + One-off method to be used if you wanted to write inputs for an arbitrary + system. Primarily used for debugging. + + diff --git a/_sources/autoapi/data/om/omdata/orca/index.rst b/_sources/autoapi/data/om/omdata/orca/index.rst new file mode 100644 index 000000000..cf1133587 --- /dev/null +++ b/_sources/autoapi/data/om/omdata/orca/index.rst @@ -0,0 +1,16 @@ +:py:mod:`data.om.omdata.orca` +============================= + +.. py:module:: data.om.omdata.orca + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + calc/index.rst + recipes/index.rst + + diff --git a/_sources/autoapi/data/om/omdata/orca/recipes/index.rst b/_sources/autoapi/data/om/omdata/orca/recipes/index.rst new file mode 100644 index 000000000..4fa66ad21 --- /dev/null +++ b/_sources/autoapi/data/om/omdata/orca/recipes/index.rst @@ -0,0 +1,76 @@ +:py:mod:`data.om.omdata.orca.recipes` +===================================== + +.. py:module:: data.om.omdata.orca.recipes + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + data.om.omdata.orca.recipes.single_point_calculation + data.om.omdata.orca.recipes.ase_relaxation + + + +.. py:function:: single_point_calculation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, outputdir=os.getcwd(), **calc_kwargs) + + Wrapper around QUACC's static job to standardize single-point calculations. + See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 + for more details. + + :param atoms: Atoms object + :type atoms: Atoms + :param charge: Charge of system + :type charge: int + :param spin_multiplicity: Multiplicity of the system + :type spin_multiplicity: int + :param xc: Exchange-correlaction functional + :type xc: str + :param basis: Basis set + :type basis: str + :param orcasimpleinput: List of `orcasimpleinput` settings for the calculator + :type orcasimpleinput: list + :param orcablocks: List of `orcablocks` swaps for the calculator + :type orcablocks: list + :param nprocs: Number of processes to parallelize across + :type nprocs: int + :param outputdir: Directory to move results to upon completion + :type outputdir: str + :param calc_kwargs: Additional kwargs for the custom Orca calculator + + +.. py:function:: ase_relaxation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, opt_params=None, outputdir=os.getcwd(), **calc_kwargs) + + Wrapper around QUACC's ase_relax_job to standardize geometry optimizations. + See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 + for more details. + + :param atoms: Atoms object + :type atoms: Atoms + :param charge: Charge of system + :type charge: int + :param spin_multiplicity: Multiplicity of the system + :type spin_multiplicity: int + :param xc: Exchange-correlaction functional + :type xc: str + :param basis: Basis set + :type basis: str + :param orcasimpleinput: List of `orcasimpleinput` settings for the calculator + :type orcasimpleinput: list + :param orcablocks: List of `orcablocks` swaps for the calculator + :type orcablocks: list + :param nprocs: Number of processes to parallelize across + :type nprocs: int + :param opt_params: Dictionary of optimizer parameters + :type opt_params: dict + :param outputdir: Directory to move results to upon completion + :type outputdir: str + :param calc_kwargs: Additional kwargs for the custom Orca calculator + + diff --git a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.rst b/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.rst deleted file mode 100644 index a1374a53b..000000000 --- a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.rst +++ /dev/null @@ -1,83 +0,0 @@ -:py:mod:`fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval` -======================================================================================= - -.. py:module:: fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.is_successful - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.compute_valid_ml_success - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.get_dft_data - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.process_ml_data - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.parse_args - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.main - - - -.. py:function:: is_successful(best_pred_energy, best_dft_energy, SUCCESS_THRESHOLD=0.1) - - Computes the success rate given the best predicted energy - and the best ground truth DFT energy. - - success_parity: The standard definition for success, where ML needs to be - within the SUCCESS_THRESHOLD, or lower, of the DFT energy. - - Returns: Bool - - -.. py:function:: compute_valid_ml_success(ml_data, dft_data) - - Computes validated ML success rates. - Here, results are generated only from ML. DFT single-points are used to - validate whether the ML energy is within 0.1eV of the DFT energy of the - predicted structure. If valid, the ML energy is compared to the ground - truth DFT energy, otherwise it is discarded. - - Return validated ML success rates. - - -.. py:function:: get_dft_data(targets) - - Organizes the released target mapping for evaluation lookup. - - Returns: Dict: - { - 'system_id 1': {'config_id 1': dft_ads_energy, 'config_id 2': dft_ads_energy}, - 'system_id 2': {'config_id 1': dft_ads_energy, 'config_id 2': dft_ads_energy}, - ... - } - - -.. py:function:: process_ml_data(results_file, model, metadata, ml_dft_targets, dft_data) - - For ML systems in which no configurations made it through the physical - constraint checks, set energies to an arbitrarily high value to ensure - a failure case in evaluation. - - Returns: Dict: - { - 'system_id 1': {'config_id 1': {'ml_energy': predicted energy, 'ml+dft_energy': dft energy of ML structure} ...}, - 'system_id 2': {'config_id 1': {'ml_energy': predicted energy, 'ml+dft_energy': dft energy of ML structure} ...}, - ... - } - - -.. py:function:: parse_args() - - -.. py:function:: main() - - This script takes in your prediction file (npz format) - and the ML model name used for ML relaxations. - Then using a mapping file, dft ground truth energy, - and ML relaxed dft energy returns the success rate of your predictions. - - diff --git a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.rst b/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.rst deleted file mode 100644 index 26d1631f6..000000000 --- a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.rst +++ /dev/null @@ -1,177 +0,0 @@ -:py:mod:`fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval` -==================================================================== - -.. py:module:: fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval - -.. autoapi-nested-parse:: - - AdsorbML evaluation script. This script expects the results-file to be - organized in a very specific structure in order to evaluate successfully. - - Results are to be saved out in a dictionary pickle file, where keys are the - `system_id` and the values are energies and compute information for a - specified `config_id`. For each `config_id` that successfully passes the - physical constraints defined in the manuscript, the following information must - be provided: - - ml_energy: The ML predicted adsorption energy on that particular `config_id`. - - ml+dft_energy: The DFT adsorption energy (SP or RX) as evaluated on - the predicted ML `config_id` structure. Do note use raw DFT energies, - ensure these are referenced correctly. None if not available. - - scf_steps: Total number of SCF steps involved in determining the DFT - adsorption energy on the predicted ML `config_id`. For relaxation - methods (ML+RX), sum all SCF steps across all frames. 0 if not - available. - - ionic_steps: Total number of ionic steps in determining the DFT - adsorption energy on the predicted ML `config_id`. This will be 1 for - single-point methods (ML+SP). 0 if not available. - - NOTE - It is possible that due to the required filtering of physical - constraints, no configurations are valid for a particular `system_id`. In - this case the system or config id can be excluded entirely from the - results file and will be treated as a failure point at evaluation time. - - e.g. - { - "6_1134_23": - { - "rand11": { - "ml_energy": -1.234, - "ml+dft_energy": -1.456, - "scf_steps": 33, - "ionic_steps": 1, - }, - "rand5": { - "ml_energy": -2.489, - "ml+dft_energy": -2.109, - "scf_steps": 16, - "ionic_steps": 1, - }, - . - . - . - }, - "7_6566_62" : - { - "rand79": { - "ml_energy": -1.234, - "ml+dft_energy": -1.456, - "scf_steps": 33, - "ionic_steps": 1, - }, - . - . - . - - }, - . - . - . - } - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.is_successful - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.compute_hybrid_success - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.compute_valid_ml_success - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.get_dft_data - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.get_dft_compute - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.filter_ml_data - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.SUCCESS_THRESHOLD - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.parser - - -.. py:data:: SUCCESS_THRESHOLD - :value: 0.1 - - - -.. py:function:: is_successful(best_ml_dft_energy, best_dft_energy) - - Computes the success rate given the best ML+DFT energy and the best ground - truth DFT energy. - - - success_parity: The standard definition for success, where ML needs to be - within the SUCCESS_THRESHOLD, or lower, of the DFT energy. - - success_much_better: A system in which the ML energy is predicted to be - much lower (less than the SUCCESS_THRESHOLD) of the DFT energy. - - -.. py:function:: compute_hybrid_success(ml_data, dft_data, k) - - Computes AdsorbML success rates at varying top-k values. - Here, results are generated for the hybrid method, where the top-k ML - energies are used to to run DFT on the corresponding ML structures. The - resulting energies are then compared to the ground truth DFT energies. - - Return success rates and DFT compute usage at varying k. - - -.. py:function:: compute_valid_ml_success(ml_data, dft_data) - - Computes validated ML success rates. - Here, results are generated only from ML. DFT single-points are used to - validate whether the ML energy is within 0.1eV of the DFT energy of the - predicted structure. If valid, the ML energy is compared to the ground - truth DFT energy, otherwise it is discarded. - - Return validated ML success rates. - - -.. py:function:: get_dft_data(targets) - - Organizes the released target mapping for evaluation lookup. - - oc20dense_targets.pkl: - ['system_id 1': [('config_id 1', dft_adsorption_energy), ('config_id 2', dft_adsorption_energy)], `system_id 2] - - Returns: Dict: - { - 'system_id 1': {'config_id 1': dft_ads_energy, 'config_id 2': dft_ads_energy}, - 'system_id 2': {'config_id 1': dft_ads_energy, 'config_id 2': dft_ads_energy}, - ... - } - - -.. py:function:: get_dft_compute(counts) - - Calculates the total DFT compute associated with establishing a ground - truth using the released DFT timings: oc20dense_compute.pkl. - - Compute is measured in the total number of self-consistent steps (SC). The - total number of ionic steps is also included for reference. - - -.. py:function:: filter_ml_data(ml_data, dft_data) - - For ML systems in which no configurations made it through the physical - constraint checks, set energies to an arbitrarily high value to ensure - a failure case in evaluation. - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.rst b/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.rst deleted file mode 100644 index ebb3362fb..000000000 --- a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.rst +++ /dev/null @@ -1,72 +0,0 @@ -:py:mod:`fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs` -====================================================================== - -.. py:module:: fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs - -.. autoapi-nested-parse:: - - This script processes ML relaxations and sets it up for the next step. - - Reads final energy and structure for each relaxation - - Filters out anomalies - - Groups together all configurations for one adsorbate-surface system - - Sorts configs by lowest energy first - - The following files are saved out: - - cache_sorted_byE.pkl: dict going from the system ID (bulk, surface, adsorbate) - to a list of configs and their relaxed structures, sorted by lowest energy first. - This is later used by write_top_k_vasp.py. - - anomalies_by_sid.pkl: dict going from integer sid to boolean representing - whether it was an anomaly. Anomalies are already excluded from cache_sorted_byE.pkl - and this file is only used for extra analyses. - - errors_by_sid.pkl: any errors that occurred - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.parse_args - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.min_diff - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.process_mlrs - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.SURFACE_CHANGE_CUTOFF_MULTIPLIER - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.DESORPTION_CUTOFF_MULTIPLIER - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.args - - -.. py:data:: SURFACE_CHANGE_CUTOFF_MULTIPLIER - :value: 1.5 - - - -.. py:data:: DESORPTION_CUTOFF_MULTIPLIER - :value: 1.5 - - - -.. py:function:: parse_args() - - -.. py:function:: min_diff(atoms_init, atoms_final) - - -.. py:function:: process_mlrs(arg) - - -.. py:data:: args - - - diff --git a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.rst b/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.rst deleted file mode 100644 index 5801f7bbe..000000000 --- a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -:py:mod:`fairchem.applications.AdsorbML.adsorbml.scripts.utils` -=============================================================== - -.. py:module:: fairchem.applications.AdsorbML.adsorbml.scripts.utils - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.AdsorbML.adsorbml.scripts.utils.converged_oszicar - fairchem.applications.AdsorbML.adsorbml.scripts.utils.count_scf - - - -.. py:function:: converged_oszicar(path, nelm=60, ediff=0.0001, idx=0) - - --- FOR VASP USERS --- - - Given a folder containing DFT outputs, ensures the system has converged - electronically. - - :param path: Path to DFT outputs. - :param nelm: Maximum number of electronic steps used. - :param ediff: Energy difference condition for terminating the electronic loop. - :param idx: Frame to check for electronic convergence. 0 for SP, -1 for RX. - - -.. py:function:: count_scf(path) - - --- FOR VASP USERS --- - - Given a folder containing DFT outputs, compute total ionic and SCF steps - - :param path: Path to DFT outputs. - - :returns: Total number of electronic steps performed. - ionic_steps (int): Total number of ionic steps performed. - :rtype: scf_steps (int) - - diff --git a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.rst b/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.rst deleted file mode 100644 index 55c17b46e..000000000 --- a/_sources/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:py:mod:`fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp` -========================================================================== - -.. py:module:: fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp - - -Module Contents ---------------- - -.. py:data:: VASP_FLAGS - - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.rst deleted file mode 100644 index 2b83468fb..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.rst +++ /dev/null @@ -1,533 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.core.autoframe` -================================================================ - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.core.autoframe - -.. autoapi-nested-parse:: - - Home of the AutoFrame classes which facillitate the generation of initial - and final frames for NEB calculations. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrame - fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameDissociation - fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameTransfer - fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameDesorption - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.core.autoframe.interpolate_and_correct_frames - fairchem.applications.CatTSunami.ocpneb.core.autoframe.get_shortest_path - fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_transfer - fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_dissociation - fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_desorption - fairchem.applications.CatTSunami.ocpneb.core.autoframe.get_product2_idx - fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_general - fairchem.applications.CatTSunami.ocpneb.core.autoframe.unwrap_atoms - fairchem.applications.CatTSunami.ocpneb.core.autoframe.interpolate - fairchem.applications.CatTSunami.ocpneb.core.autoframe.is_edge_list_respected - fairchem.applications.CatTSunami.ocpneb.core.autoframe.reorder_edge_list - fairchem.applications.CatTSunami.ocpneb.core.autoframe.is_adsorbate_adsorbed - - - -.. py:class:: AutoFrame - - - Base class to hold functions that are shared across the reaction types. - - .. py:method:: reorder_adsorbate(frame: ase.Atoms, idx_mapping: dict) - - Given the adsorbate mapping, reorder the adsorbate atoms in the final frame so that - they match the initial frame to facillitate proper interpolation. - - :param frame: the atoms object for which the adsorbate will be reordered - :type frame: ase.Atoms - :param idx_mapping: the index mapping to reorder things - :type idx_mapping: dict - - :returns: the reordered adsorbate-slab configuration - :rtype: ase.Atoms - - - .. py:method:: only_keep_unique_systems(systems, energies) - - Remove duplicate systems from `systems` and `energies`. - - :param systems: the systems to remove duplicates from - :type systems: list[ase.Atoms] - :param energies: the energies to remove duplicates from - :type energies: list[float] - - :returns: the systems with duplicates removed - list[float]: the energies with duplicates removed - :rtype: list[ase.Atoms] - - - .. py:method:: get_most_proximate_symmetric_group(initial: ase.Atoms, frame: ase.Atoms) - - For cases where the adsorbate has symmetry and the leaving group could be different - atoms / sets of atoms, determine which one make the most sense given the geometry of - the initial and final frames. This is done by minimizing the total distance traveled - by all atoms from initial to final frame. - - :param initial: the initial adsorbate-surface configuration - :type initial: ase.Atoms - :param frame: the final adsorbate-surface configuration being considered. - :type frame: ase.Atoms - - :returns: the mapping to be used which specifies the most apt leaving group - int: the index of the mapping to be used - :rtype: dict - - - .. py:method:: are_all_adsorbate_atoms_overlapping(adsorbate1: ase.Atoms, adsorbate2: ase.Atoms) - - Test to see if all the adsorbate atoms are intersecting to find unique structures. - Systems where they are overlapping are considered the same. - - :param adsorbate1: just the adsorbate atoms of a structure that is being - compared - :type adsorbate1: ase.Atoms - :param adsorbate2: just the adsorbate atoms of the other structure that - is being compared - :type adsorbate2: ase.Atoms - - :returns: - - True if all adsorbate atoms are overlapping (structure is a match) - False if one or more of the adsorbate atoms do not overlap - :rtype: (bool) - - - -.. py:class:: AutoFrameDissociation(reaction: ocpneb.core.Reaction, reactant_system: ase.Atoms, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_product1_max: float = None, r_product2_max: float = None, r_product2_min: float = None) - - - Bases: :py:obj:`AutoFrame` - - Base class to hold functions that are shared across the reaction types. - - .. py:method:: get_neb_frames(calculator, n_frames: int = 5, n_pdt1_sites: int = 5, n_pdt2_sites: int = 5, fmax: float = 0.05, steps: int = 200) - - Propose final frames for NEB calculations. Perform a relaxation on the final - frame using the calculator provided. Interpolate between the initial - and final frames for a proposed reaction trajectory. Correct the trajectory if - there is any atomic overlap. - - :param calculator: an ase compatible calculator to be used to relax the final frame. - :param n_frames: the number of frames per reaction trajectory - :type n_frames: int - :param n_pdt1_sites: The number of product 1 sites to consider - :type n_pdt1_sites: int - :param n_pdt2_sites: The number of product 2 sites to consider. Note this is - multiplicative with `n_pdt1_sites` (i.e. if `n_pdt1_sites` = 2 and - `n_pdt2_sites` = 3 then a total of 6 final frames will be proposed) - :type n_pdt2_sites: int - :param fmax: force convergence criterion for final frame optimization - :type fmax: float - :param steps: step number termination criterion for final frame optimization - :type steps: int - - :returns: the initial reaction coordinates - :rtype: list[lists] - - - .. py:method:: get_best_sites_for_product1(n_sites: int = 5) - - Wrapper to find product 1 placements to be considered for the final frame - of the NEB. - - :param n_sites: The number of sites for product 1 to consider. Notice this is - multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) - then a total of 6 initial and final frames will be considered. - :type n_sites: int - - :returns: - - the lowest energy, proximate placements of product - 1 to be used in the final NEB frames - :rtype: (list[ase.Atoms]) - - - .. py:method:: get_best_unique_sites_for_product2(product1: ase.Atoms, n_sites: int = 5) - - Wrapper to find product 2 placements to be considered for the final frame - of the NEB. - - :param product1: The atoms object of the product 1 placement that will be - considered in this function to search for product 1 + product 2 combinations - for the final frame. - :type product1: ase.Atoms - :param n_sites: The number of sites for product 1 to consider. Notice this is - multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) - then a total of 6 initial and final frames will be considered. - :type n_sites: int - - :returns: - - the lowest energy, proximate placements of product - 2 to be used in the final NEB frames - :rtype: (list[ase.Atoms]) - - - .. py:method:: get_sites_within_r(center_coordinate: numpy.ndarray, all_systems: list, all_system_energies: list, all_systems_binding_idx: int, allowed_radius_max: float, allowed_radius_min: float, n_sites: int = 5) - - Get the n lowest energy, sites of the systems within r. For now n is - 5 or < 5 if there are fewer than 5 unique sites within r. - - :param center_coordinate: the coordinate about which r should be - centered. - :type center_coordinate: np.ndarray - :param all_systems: the list of all systems to be assessed for their - uniqueness and proximity to the center coordinate. - :type all_systems: list - :param all_systems_binding_idx: the idx of the adsorbate atom that is - bound in `all_systems` - :type all_systems_binding_idx: int - :param allowed_radius_max: the outer radius about `center_coordinate` - in which the adsorbate must lie to be considered. - :type allowed_radius_max: float - :param allowed_radius_min: the inner radius about `center_coordinate` - which the adsorbate must lie outside of to be considered. - :type allowed_radius_min: float - :param n_sites: the number of unique sites in r that will be chosen. - :type n_sites: int - - :returns: list of systems identified as candidates. - :rtype: (list[ase.Atoms]) - - - -.. py:class:: AutoFrameTransfer(reaction: ocpneb.core.Reaction, reactant1_systems: list, reactant2_systems: list, reactant1_energies: list, reactant2_energies: list, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_traverse_max: float, r_react_max: float, r_react_min: float) - - - Bases: :py:obj:`AutoFrame` - - Base class to hold functions that are shared across the reaction types. - - .. py:method:: get_neb_frames(calculator, n_frames: int = 10, n_initial_frames: int = 5, n_final_frames_per_initial: int = 5, fmax: float = 0.05, steps: int = 200) - - Propose final frames for NEB calculations. Perform a relaxation on the final - frame using the calculator provided. Linearly interpolate between the initial - and final frames for a proposed reaction trajectory. Correct the trajectory if - there is any atomic overlap. - - :param calculator: an ase compatible calculator to be used to relax the initial and - final frames. - :param n_frames: the number of frames per reaction trajectory - :type n_frames: int - :param n_initial_frames: The number of initial frames to consider - :type n_initial_frames: int - :param n_final_frames_per_initial: The number of final frames per inital frame to consider - :type n_final_frames_per_initial: int - :param fmax: force convergence criterion for final frame optimization - :type fmax: float - :param steps: step number termination criterion for final frame optimization - :type steps: int - - :returns: the initial reaction coordinates - :rtype: list[lists] - - - .. py:method:: get_system_pairs_initial() - - Get the initial frames for the NEB. This is done by finding the closest - pair of systems from `systems1` and `systems2` for which the interstitial distance - between all adsorbate atoms is less than `rmax` and greater than `rmin`. - - :returns: the initial frames for the NEB - list[float]: the pseudo energies of the initial frames (i.e just the sum of the - individual adsorption energies) - :rtype: list[ase.Atoms] - - - .. py:method:: get_system_pairs_final(system1_coord, system2_coord) - - Get the final frames for the NEB. This is done by finding the closest - pair of systems from `systems1` and `systems2` for which the distance - traversed by the adsorbate from the initial frame to the final frame is - less than `rmax` and the minimum interstitial distance between the two - products in greater than `rmin`. - - :returns: the initial frames for the NEB - list[float]: the pseudo energies of the initial frames - :rtype: list[ase.Atoms] - - - -.. py:class:: AutoFrameDesorption(reaction: ocpneb.core.Reaction, reactant_systems: list, reactant_energies: list, z_desorption: float) - - - Bases: :py:obj:`AutoFrame` - - Base class to hold functions that are shared across the reaction types. - - .. py:method:: get_neb_frames(calculator, n_frames: int = 5, n_systems: int = 5, fmax: float = 0.05, steps: int = 200) - - Propose final frames for NEB calculations. Perform a relaxation on the final - frame using the calculator provided. Linearly interpolate between the initial - and final frames for a proposed reaction trajectory. Correct the trajectory if - there is any atomic overlap. - - :param calculator: an ase compatible calculator to be used to relax the final frame. - :param n_frames: the number of frames per reaction trajectory - :type n_frames: int - :param n_pdt1_sites: The number of product 1 sites to consider - :type n_pdt1_sites: int - :param n_pdt2_sites: The number of product 2 sites to consider. Note this is - multiplicative with `n_pdt1_sites` (i.e. if `n_pdt1_sites` = 2 and - `n_pdt2_sites` = 3 then a total of 6 final frames will be proposed) - :type n_pdt2_sites: int - :param fmax: force convergence criterion for final frame optimization - :type fmax: float - :param steps: step number termination criterion for final frame optimization - :type steps: int - - :returns: the initial reaction coordinates - :rtype: list[lists] - - - -.. py:function:: interpolate_and_correct_frames(initial: ase.Atoms, final: ase.Atoms, n_frames: int, reaction: ocpneb.core.Reaction, map_idx: int) - - Given the initial and final frames, perform the following: - (1) Unwrap the final frame if it is wrapped around the cell - (2) Interpolate between the initial and final frames - - :param initial: the initial frame of the NEB - :type initial: ase.Atoms - :param final: the proposed final frame of the NEB - :type final: ase.Atoms - :param n_frames: The desired number of frames for the NEB (not including initial and final) - :type n_frames: int - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param map_idx: the index of the mapping to use for the final frame - :type map_idx: int - - -.. py:function:: get_shortest_path(initial: ase.Atoms, final: ase.Atoms) - - Find the shortest path for all atoms about pbc and reorient the final frame so the - atoms align with this shortest path. This allows us to perform a linear interpolation - that does not interpolate jumps across pbc. - - :param initial: the initial frame of the NEB - :type initial: ase.Atoms - :param final: the proposed final frame of the NEB to be corrected - :type final: ase.Atoms - - :returns: the corrected final frame - (ase.Atoms): the initial frame tiled (3,3,1), which is used it later steps - (ase.Atoms): the final frame tiled (3,3,1), which is used it later steps - :rtype: (ase.Atoms) - - -.. py:function:: traverse_adsorbate_transfer(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: list) - - Traverse reactant 1, reactant 2, product 1 and product 2 in a depth first search of - the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures - that when we perform the linear interpolation, the adsorbate moves as a single moity - and avoids accidental bond breaking events over pbc. - - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param initial: the initial frame of the NEB - :type initial: ase.Atoms - :param final: the proposed final frame of the NEB to be corrected - :type final: ase.Atoms - :param initial_tiled: the initial frame tiled (3,3,1) - :type initial_tiled: ase.Atoms - :param final_tiled: the final frame tiled (3,3,1) - :type final_tiled: ase.Atoms - :param edge_list_final: the edge list of the final frame corrected with mapping - idx changes - :type edge_list_final: list - - :returns: the corrected initial frame - (ase.Atoms): the corrected final frame - :rtype: (ase.Atoms) - - -.. py:function:: traverse_adsorbate_dissociation(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: int) - - Traverse reactant 1, product 1 and product 2 in a depth first search of - the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures - that when we perform the linear interpolation, the adsorbate moves as a single moity - and avoids accidental bond breaking events over pbc. - - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param initial: the initial frame of the NEB - :type initial: ase.Atoms - :param final: the proposed final frame of the NEB to be corrected - :type final: ase.Atoms - :param initial_tiled: the initial frame tiled (3,3,1) - :type initial_tiled: ase.Atoms - :param final_tiled: the final frame tiled (3,3,1) - :type final_tiled: ase.Atoms - :param edge_list_final: the edge list of the final frame corrected with mapping - idx changes - :type edge_list_final: list - - :returns: the corrected initial frame - (ase.Atoms): the corrected final frame - :rtype: (ase.Atoms) - - -.. py:function:: traverse_adsorbate_desorption(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms) - - Traverse reactant 1 and product 1 in a depth first search of - the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures - that when we perform the linear interpolation, the adsorbate moves as a single moity - and avoids accidental bond breaking events over pbc. - - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param initial: the initial frame of the NEB - :type initial: ase.Atoms - :param final: the proposed final frame of the NEB to be corrected - :type final: ase.Atoms - :param initial_tiled: the initial frame tiled (3,3,1) - :type initial_tiled: ase.Atoms - :param final_tiled: the final frame tiled (3,3,1) - :type final_tiled: ase.Atoms - :param edge_list_final: the edge list of the final frame corrected with mapping - idx changes - :type edge_list_final: list - - :returns: the corrected initial frame - (ase.Atoms): the corrected final frame - :rtype: (ase.Atoms) - - -.. py:function:: get_product2_idx(reaction: ocpneb.core.Reaction, edge_list_final: list, traversal_rxt1_final: list) - - For dissociation only. Use the information about the initial edge list and final edge - list to determine which atom in product 2 lost a bond in the reaction and use this - as the binding index for traversal in `traverse_adsorbate_dissociation`. - - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param edge_list_final: the edge list of the final frame corrected with mapping - idx changes - :type edge_list_final: list - :param traversal_rxt1_final: the traversal of reactant 1 for the final frame - :type traversal_rxt1_final: list - - :returns: the binding index of product 2 - :rtype: (int) - - -.. py:function:: traverse_adsorbate_general(traversal_rxt, slab_len: int, starting_node_idx: int, equivalent_idx_factors: numpy.ndarray, frame: ase.Atoms, frame_tiled: ase.Atoms) - - Perform the traversal to reposition atoms so that the distance along bonds is - minimized. - - :param traversal_rxt: the traversal of the adsorbate to be traversed. It is - the list of edges ordered by depth first search. - :type traversal_rxt: list - :param slab_len: the number of atoms in the slab - :type slab_len: int - :param starting_node_idx: the index of the atom to start the traversal from - :type starting_node_idx: int - :param equivalent_idx_factors: the values to add to the untiled index - which gives equivalent indices (i.e. copies of that atom in the tiled system) - :type equivalent_idx_factors: np.ndarray - :param frame: the frame to be corrected - :type frame: ase.Atoms - :param frame_tiled: the tiled (3,3,1) version of the frame which will be - corrected - :type frame_tiled: ase.Atoms - - :returns: the corrected frame - :rtype: (ase.Atoms) - - -.. py:function:: unwrap_atoms(initial: ase.Atoms, final: ase.Atoms, reaction: ocpneb.core.Reaction, map_idx: int) - - Make corrections to the final frame so it is no longer wrapped around the cell, - if it has jumpped over the pbc. Ensure that for each adsorbate moity, absolute bond distances - for all edges that exist in the initial and final frames are minimize regardles of cell location. - This enforces the traversal of the adsorbates happens along the same path, which is not - necessarily the minimum distance path for each atom. Changes are made in place. - - :param initial: the initial atoms object to which the final atoms should - be proximate - :type initial: ase.Atoms - :param final: the final atoms object to be corrected - :type final: ase.Atoms - :param reaction: the reaction object which provides pertinent info - :type reaction: ocpneb.core.Reaction - :param map_idx: the index of the mapping to use for the final frame - :type map_idx: int - - -.. py:function:: interpolate(initial_frame: ase.Atoms, final_frame: ase.Atoms, num_frames: int) - - Interpolate between the initial and final frames starting with a linear interpolation - along the atom-wise vectors from initial to final. Then iteratively correct the - positions so atomic overlap is avoided/ reduced. When iteratively updating, the - positions of adjacent frames are considered to avoid large jumps in the trajectory. - - :param initial_frame: the initial frame which will be interpolated from - :type initial_frame: ase.Atoms - :param final_frame: the final frame which will be interpolated to - :type final_frame: ase.Atoms - :param num_frames: the number of frames to be interpolated between the initial - :type num_frames: int - - :returns: the interpolated frames - :rtype: (list[ase.Atoms]) - - -.. py:function:: is_edge_list_respected(frame: ase.Atoms, edge_list: list) - - Check to see that the expected adsorbate-adsorbate edges are found and no additional - edges exist between the adsorbate atoms. - - :param frame: the atoms object for which edges will be checked. - This must comply with ocp tagging conventions. - :type frame: ase.Atoms - :param edge_list: The expected edges - :type edge_list: list[tuples] - - -.. py:function:: reorder_edge_list(edge_list: list, mapping: dict) - - For the final edge list, apply the mapping so the edges correspond to the correctly - concatenated object. - - :param edge_list: the final edgelist - :type edge_list: list[tuples] - :param mapping: the mapping so the final atoms concatenated have indices that correctly map - to the initial atoms. - - -.. py:function:: is_adsorbate_adsorbed(adsorbate_slab_config: ase.Atoms) - - Check to see if the adsorbate is adsorbed on the surface. - - :param adsorbate_slab_config: the combined adsorbate and slab configuration - with adsorbate atoms tagged as 2s and surface atoms tagged as 1s. - :type adsorbate_slab_config: ase.Atoms - - :returns: True if the adsorbate is adsorbed, False otherwise. - :rtype: (bool) - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.rst deleted file mode 100644 index f6e77d5f6..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.core` -====================================================== - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.core - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - autoframe/index.rst - ocpneb/index.rst - reaction/index.rst - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.rst deleted file mode 100644 index 480e7c668..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.rst +++ /dev/null @@ -1,44 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.core.ocpneb` -============================================================= - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.core.ocpneb - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.core.ocpneb.OCPNEB - - - - -.. py:class:: OCPNEB(images, checkpoint_path, k=0.1, fmax=0.05, climb=False, parallel=False, remove_rotation_and_translation=False, world=None, dynamic_relaxation=True, scale_fmax=0.0, method='aseneb', allow_shared_calculator=False, precon=None, cpu=False, batch_size=4) - - - Bases: :py:obj:`ase.neb.DyNEB` - - .. py:method:: load_checkpoint(checkpoint_path: str) -> None - - Load existing trained model - - :param checkpoint_path: string - Path to trained model - - - .. py:method:: get_forces() - - Evaluate and return the forces. - - - .. py:method:: set_positions(positions) - - - .. py:method:: get_precon_forces(forces, energies, images) - - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.rst deleted file mode 100644 index 2e19e99d0..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.core.reaction` -=============================================================== - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.core.reaction - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.core.reaction.Reaction - - - - -.. py:class:: Reaction(reaction_db_path: str, adsorbate_db_path: str, reaction_id_from_db: int = None, reaction_str_from_db: str = None, reaction_type: str = None) - - - Initialize Reaction object - - .. py:method:: get_desorption_mapping(reactant) - - Get mapping for desorption reaction - - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.rst deleted file mode 100644 index 027a22b0e..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.databases` -=========================================================== - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.databases - - -Package Contents ----------------- - -.. py:data:: DISSOCIATION_REACTION_DB_PATH - - - -.. py:data:: DESORPTION_REACTION_DB_PATH - - - -.. py:data:: TRANSFER_REACTION_DB_PATH - - - diff --git a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.rst b/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.rst deleted file mode 100644 index 3f0d5f4dd..000000000 --- a/_sources/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.rst +++ /dev/null @@ -1,165 +0,0 @@ -:py:mod:`fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation` -=============================================================================== - -.. py:module:: fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation - -.. autoapi-nested-parse:: - - A python script to run a validation of the ML NEB model on a set of NEB calculations. - This script has not been written to run in parallel, but should be modified to do so. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_results_sp - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_results_ml - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.all_converged - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.both_barrierless - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.both_barriered - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.barrierless_converged - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.is_failed_sp - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.parse_neb_info - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_single_point - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.parser - - -.. py:function:: get_results_sp(df2: pandas.DataFrame) - - Get the % success and % convergence for the model considered with - single points performed on the transition states. - - :param df2: The dataframe containing the results of the - NEB calculations. - :type df2: pd.DataFrame - - :returns: - - a tuple of strings containing the % success and - % convergence - :rtype: (tuple[str]) - - -.. py:function:: get_results_ml(df2) - - Get the % success and % convergence for the model considered with - just ML energy and force calls. - - :param df2: The dataframe containing the results of the - NEB calculations. - :type df2: pd.DataFrame - - :returns: - - a tuple of strings containing the % success and - % convergence - :rtype: (tuple[str]) - - -.. py:function:: all_converged(row, ml=True) - - Dataframe function which makes the job of filtering to get % success cleaner. - It assesses the convergence. - - :param row: the dataframe row which the function is applied to - :param ml: boolean value. If `True` just the ML NEB and DFT NEB convergence are - considered. If `False`, the single point convergence is also considered. - - :returns: whether the system is converged - :rtype: bool - - -.. py:function:: both_barrierless(row) - - Dataframe function which makes the job of filtering to get % success cleaner. - It assesses if both DFT and ML find a barrierless transition state. - - :param row: the dataframe row which the function is applied to - - :returns: True if both ML and DFT find a barrierless transition state, False otherwise - :rtype: bool - - -.. py:function:: both_barriered(row) - - Dataframe function which makes the job of filtering to get % success cleaner. - It assesses if both DFT and ML find a barriered transition state. - - :param row: the dataframe row which the function is applied to - - :returns: True if both ML and DFT find a barriered transition state, False otherwise - :rtype: bool - - -.. py:function:: barrierless_converged(row) - - Dataframe function which makes the job of filtering to get % success cleaner. - It assesses if both DFT and ML find a barrierless, converged transition state. - - :param row: the dataframe row which the function is applied to - - :returns: - - True if both ML and DFT find a barrierless converged transition state, - False otherwise - :rtype: bool - - -.. py:function:: is_failed_sp(row) - - Dataframe function which makes the job of filtering to get % success cleaner. - It assesses if the single point failed. - - :param row: the dataframe row which the function is applied to - - :returns: True if ths single point failed, otherwise False - :rtype: bool - - -.. py:function:: parse_neb_info(neb_frames: list, calc, conv: bool, entry: dict) - - At the conclusion of the ML NEB, this function processes the important - results and adds them to the entry dictionary. - - :param neb_frames: the ML relaxed NEB frames - :type neb_frames: list[ase.Atoms] - :param calc: the ocp ase Atoms calculator - :param conv: whether or not the NEB achieved forces below the threshold within - the number of allowed steps - :type conv: bool - :param entry: the entry corresponding to the NEB performed - :type entry: dict - - -.. py:function:: get_single_point(atoms: ase.Atoms, vasp_dir: str, vasp_flags: dict, vasp_command: str) - - Gets a single point on the atoms passed. - - :param atoms: the atoms object on which the single point will be performed - :type atoms: ase.Atoms - :param vasp_dir: the path where the vasp files should be written - :type vasp_dir: str - :param vasp_flags: a dictionary of the vasp INCAR flags - :param vasp_command: the - :type vasp_command: str - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/_cli/index.rst b/_sources/autoapi/fairchem/core/_cli/index.rst deleted file mode 100644 index f5a57dc50..000000000 --- a/_sources/autoapi/fairchem/core/_cli/index.rst +++ /dev/null @@ -1,66 +0,0 @@ -:py:mod:`fairchem.core._cli` -============================ - -.. py:module:: fairchem.core._cli - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core._cli.Runner - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core._cli.main - - - -.. py:class:: Runner(distributed: bool = False) - - - Bases: :py:obj:`submitit.helpers.Checkpointable` - - Derived callable classes are requeued after timeout with their current - state dumped at checkpoint. - - __call__ method must be implemented to make your class a callable. - - .. note:: - - The following implementation of the checkpoint method resubmits the full current - state of the callable (self) with the initial argument. You may want to replace the method to - curate the state (dump a neural network to a standard format and remove it from - the state so that not to pickle it) and change/remove the initial parameters. - - .. py:method:: __call__(config: dict) -> None - - - .. py:method:: checkpoint(*args, **kwargs) - - Resubmits the same callable with the same arguments - - - -.. py:function:: main() - - Run the main ocp-models program. - - diff --git a/_sources/autoapi/fairchem/core/common/data_parallel/index.rst b/_sources/autoapi/fairchem/core/common/data_parallel/index.rst deleted file mode 100644 index 14ea5fb18..000000000 --- a/_sources/autoapi/fairchem/core/common/data_parallel/index.rst +++ /dev/null @@ -1,169 +0,0 @@ -:py:mod:`fairchem.core.common.data_parallel` -============================================ - -.. py:module:: fairchem.core.common.data_parallel - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.data_parallel.OCPCollater - fairchem.core.common.data_parallel._HasMetadata - fairchem.core.common.data_parallel.StatefulDistributedSampler - fairchem.core.common.data_parallel.BalancedBatchSampler - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.data_parallel.balanced_partition - - - -.. py:class:: OCPCollater(otf_graph: bool = False) - - - .. py:method:: __call__(data_list: list[torch_geometric.data.Data]) -> torch_geometric.data.Batch - - - -.. py:function:: balanced_partition(sizes: numpy.typing.NDArray[numpy.int_], num_parts: int) - - Greedily partition the given set by always inserting - the largest element into the smallest partition. - - -.. py:class:: _HasMetadata - - - Bases: :py:obj:`Protocol` - - Base class for protocol classes. - - Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing). - - For example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing.runtime_checkable act as simple-minded runtime protocols that check - only the presence of given attributes, ignoring their type signatures. - Protocol classes can be generic, they are defined as:: - - class GenProto(Protocol[T]): - def meth(self) -> T: - ... - - .. py:property:: metadata_path - :type: pathlib.Path - - - -.. py:class:: StatefulDistributedSampler(dataset, batch_size, **kwargs) - - - Bases: :py:obj:`torch.utils.data.DistributedSampler` - - More fine-grained state DataSampler that uses training iteration and epoch - both for shuffling data. PyTorch DistributedSampler only uses epoch - for the shuffling and starts sampling data from the start. In case of training - on very large data, we train for one epoch only and when we resume training, - we want to resume the data sampler from the training iteration. - - .. py:method:: __iter__() - - - .. py:method:: set_epoch_and_start_iteration(epoch, start_iter) - - - -.. py:class:: BalancedBatchSampler(dataset, batch_size: int, num_replicas: int, rank: int, device: torch.device, mode: str | bool = 'atoms', shuffle: bool = True, drop_last: bool = False, force_balancing: bool = False, throw_on_error: bool = False) - - - Bases: :py:obj:`torch.utils.data.Sampler` - - Base class for all Samplers. - - Every Sampler subclass has to provide an :meth:`__iter__` method, providing a - way to iterate over indices or lists of indices (batches) of dataset elements, and a :meth:`__len__` method - that returns the length of the returned iterators. - - :param data_source: This argument is not used and will be removed in 2.2.0. - You may still have custom implementation that utilizes it. - :type data_source: Dataset - - .. rubric:: Example - - >>> # xdoctest: +SKIP - >>> class AccedingSequenceLengthSampler(Sampler[int]): - >>> def __init__(self, data: List[str]) -> None: - >>> self.data = data - >>> - >>> def __len__(self) -> int: - >>> return len(self.data) - >>> - >>> def __iter__(self) -> Iterator[int]: - >>> sizes = torch.tensor([len(x) for x in self.data]) - >>> yield from torch.argsort(sizes).tolist() - >>> - >>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]): - >>> def __init__(self, data: List[str], batch_size: int) -> None: - >>> self.data = data - >>> self.batch_size = batch_size - >>> - >>> def __len__(self) -> int: - >>> return (len(self.data) + self.batch_size - 1) // self.batch_size - >>> - >>> def __iter__(self) -> Iterator[List[int]]: - >>> sizes = torch.tensor([len(x) for x in self.data]) - >>> for batch in torch.chunk(torch.argsort(sizes), len(self)): - >>> yield batch.tolist() - - .. note:: The :meth:`__len__` method isn't strictly required by - :class:`~torch.utils.data.DataLoader`, but is expected in any - calculation involving the length of a :class:`~torch.utils.data.DataLoader`. - - .. py:method:: _load_dataset(dataset, mode: Literal[atoms, neighbors]) - - - .. py:method:: __len__() -> int - - - .. py:method:: set_epoch_and_start_iteration(epoch: int, start_iteration: int) -> None - - - .. py:method:: __iter__() - - - diff --git a/_sources/autoapi/fairchem/core/common/distutils/index.rst b/_sources/autoapi/fairchem/core/common/distutils/index.rst deleted file mode 100644 index 1f72945f9..000000000 --- a/_sources/autoapi/fairchem/core/common/distutils/index.rst +++ /dev/null @@ -1,88 +0,0 @@ -:py:mod:`fairchem.core.common.distutils` -======================================== - -.. py:module:: fairchem.core.common.distutils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.distutils.os_environ_get_or_throw - fairchem.core.common.distutils.setup - fairchem.core.common.distutils.cleanup - fairchem.core.common.distutils.initialized - fairchem.core.common.distutils.get_rank - fairchem.core.common.distutils.get_world_size - fairchem.core.common.distutils.is_master - fairchem.core.common.distutils.synchronize - fairchem.core.common.distutils.broadcast - fairchem.core.common.distutils.all_reduce - fairchem.core.common.distutils.all_gather - fairchem.core.common.distutils.gather_objects - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.distutils.T - - -.. py:data:: T - - - -.. py:function:: os_environ_get_or_throw(x: str) -> str - - -.. py:function:: setup(config) -> None - - -.. py:function:: cleanup() -> None - - -.. py:function:: initialized() -> bool - - -.. py:function:: get_rank() -> int - - -.. py:function:: get_world_size() -> int - - -.. py:function:: is_master() -> bool - - -.. py:function:: synchronize() -> None - - -.. py:function:: broadcast(tensor: torch.Tensor, src, group=dist.group.WORLD, async_op: bool = False) -> None - - -.. py:function:: all_reduce(data, group=dist.group.WORLD, average: bool = False, device=None) -> torch.Tensor - - -.. py:function:: all_gather(data, group=dist.group.WORLD, device=None) -> list[torch.Tensor] - - -.. py:function:: gather_objects(data: T, group: torch.distributed.ProcessGroup = dist.group.WORLD) -> list[T] - - Gather a list of pickleable objects into rank 0 - - diff --git a/_sources/autoapi/fairchem/core/common/flags/index.rst b/_sources/autoapi/fairchem/core/common/flags/index.rst deleted file mode 100644 index c1cc09f54..000000000 --- a/_sources/autoapi/fairchem/core/common/flags/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -:py:mod:`fairchem.core.common.flags` -==================================== - -.. py:module:: fairchem.core.common.flags - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.flags.Flags - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.flags.flags - - -.. py:class:: Flags - - - .. py:method:: get_parser() -> argparse.ArgumentParser - - - .. py:method:: add_core_args() -> None - - - -.. py:data:: flags - - - diff --git a/_sources/autoapi/fairchem/core/common/gp_utils/index.rst b/_sources/autoapi/fairchem/core/common/gp_utils/index.rst deleted file mode 100644 index 5f50daf10..000000000 --- a/_sources/autoapi/fairchem/core/common/gp_utils/index.rst +++ /dev/null @@ -1,570 +0,0 @@ -:py:mod:`fairchem.core.common.gp_utils` -======================================= - -.. py:module:: fairchem.core.common.gp_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.gp_utils.CopyToModelParallelRegion - fairchem.core.common.gp_utils.ReduceFromModelParallelRegion - fairchem.core.common.gp_utils.ScatterToModelParallelRegion - fairchem.core.common.gp_utils.GatherFromModelParallelRegion - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.gp_utils.ensure_div - fairchem.core.common.gp_utils.divide_and_check_no_remainder - fairchem.core.common.gp_utils.setup_gp - fairchem.core.common.gp_utils.cleanup_gp - fairchem.core.common.gp_utils.initialized - fairchem.core.common.gp_utils.get_dp_group - fairchem.core.common.gp_utils.get_gp_group - fairchem.core.common.gp_utils.get_dp_rank - fairchem.core.common.gp_utils.get_gp_rank - fairchem.core.common.gp_utils.get_dp_world_size - fairchem.core.common.gp_utils.get_gp_world_size - fairchem.core.common.gp_utils.pad_tensor - fairchem.core.common.gp_utils.trim_tensor - fairchem.core.common.gp_utils._split_tensor - fairchem.core.common.gp_utils._reduce - fairchem.core.common.gp_utils._split - fairchem.core.common.gp_utils._gather - fairchem.core.common.gp_utils._gather_with_padding - fairchem.core.common.gp_utils.copy_to_model_parallel_region - fairchem.core.common.gp_utils.reduce_from_model_parallel_region - fairchem.core.common.gp_utils.scatter_to_model_parallel_region - fairchem.core.common.gp_utils.gather_from_model_parallel_region - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.gp_utils._GRAPH_PARALLEL_GROUP - fairchem.core.common.gp_utils._DATA_PARALLEL_GROUP - - -.. py:data:: _GRAPH_PARALLEL_GROUP - - - -.. py:data:: _DATA_PARALLEL_GROUP - - - -.. py:function:: ensure_div(a: int, b: int) -> None - - -.. py:function:: divide_and_check_no_remainder(a: int, b: int) -> int - - -.. py:function:: setup_gp(config) -> None - - -.. py:function:: cleanup_gp() -> None - - -.. py:function:: initialized() -> bool - - -.. py:function:: get_dp_group() - - -.. py:function:: get_gp_group() - - -.. py:function:: get_dp_rank() -> int - - -.. py:function:: get_gp_rank() -> int - - -.. py:function:: get_dp_world_size() -> int - - -.. py:function:: get_gp_world_size() -> int - - -.. py:function:: pad_tensor(tensor: torch.Tensor, dim: int = -1, target_size: int | None = None) -> torch.Tensor - - -.. py:function:: trim_tensor(tensor: torch.Tensor, sizes: torch.Tensor | None = None, dim: int = 0) - - -.. py:function:: _split_tensor(tensor: torch.Tensor, num_parts: int, dim: int = -1, contiguous_chunks: bool = False) - - -.. py:function:: _reduce(ctx: Any, input: torch.Tensor) -> torch.Tensor - - -.. py:function:: _split(input: torch.Tensor, dim: int = -1) -> torch.Tensor - - -.. py:function:: _gather(input: torch.Tensor, dim: int = -1) -> torch.Tensor - - -.. py:function:: _gather_with_padding(input: torch.Tensor, dim: int = -1) -> torch.Tensor - - -.. py:class:: CopyToModelParallelRegion(*args, **kwargs) - - - Bases: :py:obj:`torch.autograd.Function` - - Base class to create custom `autograd.Function`. - - To create a custom `autograd.Function`, subclass this class and implement - the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom - op in the forward pass, call the class method ``apply``. Do not call - :meth:`forward` directly. - - To ensure correctness and best performance, make sure you are calling the - correct methods on ``ctx`` and validating your backward function using - :func:`torch.autograd.gradcheck`. - - See :ref:`extending-autograd` for more details on how to use this class. - - Examples:: - - >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) - >>> class Exp(Function): - >>> @staticmethod - >>> def forward(ctx, i): - >>> result = i.exp() - >>> ctx.save_for_backward(result) - >>> return result - >>> - >>> @staticmethod - >>> def backward(ctx, grad_output): - >>> result, = ctx.saved_tensors - >>> return grad_output * result - >>> - >>> # Use it by calling the apply method: - >>> # xdoctest: +SKIP - >>> output = Exp.apply(input) - - .. py:method:: forward(ctx, input: torch.Tensor) -> torch.Tensor - :staticmethod: - - Define the forward of the custom autograd Function. - - This function is to be overridden by all subclasses. - There are two ways to define forward: - - Usage 1 (Combined forward and ctx):: - - @staticmethod - def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: - pass - - - It must accept a context ctx as the first argument, followed by any - number of arguments (tensors or other types). - - See :ref:`combining-forward-context` for more details - - Usage 2 (Separate forward and ctx):: - - @staticmethod - def forward(*args: Any, **kwargs: Any) -> Any: - pass - - @staticmethod - def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: - pass - - - The forward no longer accepts a ctx argument. - - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` - staticmethod to handle setting up the ``ctx`` object. - ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs - to the forward. - - See :ref:`extending-autograd` for more details - - The context can be used to store arbitrary data that can be then - retrieved during the backward pass. Tensors should not be stored - directly on `ctx` (though this is not currently enforced for - backward compatibility). Instead, tensors should be saved either with - :func:`ctx.save_for_backward` if they are intended to be used in - ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` - if they are intended to be used for in ``jvp``. - - - .. py:method:: backward(ctx, grad_output: torch.Tensor) -> torch.Tensor - :staticmethod: - - Define a formula for differentiating the operation with backward mode automatic differentiation. - - This function is to be overridden by all subclasses. - (Defining this function is equivalent to defining the ``vjp`` function.) - - It must accept a context :attr:`ctx` as the first argument, followed by - as many outputs as the :func:`forward` returned (None will be passed in - for non tensor outputs of the forward function), - and it should return as many tensors, as there were inputs to - :func:`forward`. Each argument is the gradient w.r.t the given output, - and each returned value should be the gradient w.r.t. the - corresponding input. If an input is not a Tensor or is a Tensor not - requiring grads, you can just pass None as a gradient for that input. - - The context can be used to retrieve tensors saved during the forward - pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple - of booleans representing whether each input needs gradient. E.g., - :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the - first input to :func:`forward` needs gradient computed w.r.t. the - output. - - - -.. py:class:: ReduceFromModelParallelRegion(*args, **kwargs) - - - Bases: :py:obj:`torch.autograd.Function` - - Base class to create custom `autograd.Function`. - - To create a custom `autograd.Function`, subclass this class and implement - the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom - op in the forward pass, call the class method ``apply``. Do not call - :meth:`forward` directly. - - To ensure correctness and best performance, make sure you are calling the - correct methods on ``ctx`` and validating your backward function using - :func:`torch.autograd.gradcheck`. - - See :ref:`extending-autograd` for more details on how to use this class. - - Examples:: - - >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) - >>> class Exp(Function): - >>> @staticmethod - >>> def forward(ctx, i): - >>> result = i.exp() - >>> ctx.save_for_backward(result) - >>> return result - >>> - >>> @staticmethod - >>> def backward(ctx, grad_output): - >>> result, = ctx.saved_tensors - >>> return grad_output * result - >>> - >>> # Use it by calling the apply method: - >>> # xdoctest: +SKIP - >>> output = Exp.apply(input) - - .. py:method:: forward(ctx, input: torch.Tensor) -> torch.Tensor - :staticmethod: - - Define the forward of the custom autograd Function. - - This function is to be overridden by all subclasses. - There are two ways to define forward: - - Usage 1 (Combined forward and ctx):: - - @staticmethod - def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: - pass - - - It must accept a context ctx as the first argument, followed by any - number of arguments (tensors or other types). - - See :ref:`combining-forward-context` for more details - - Usage 2 (Separate forward and ctx):: - - @staticmethod - def forward(*args: Any, **kwargs: Any) -> Any: - pass - - @staticmethod - def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: - pass - - - The forward no longer accepts a ctx argument. - - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` - staticmethod to handle setting up the ``ctx`` object. - ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs - to the forward. - - See :ref:`extending-autograd` for more details - - The context can be used to store arbitrary data that can be then - retrieved during the backward pass. Tensors should not be stored - directly on `ctx` (though this is not currently enforced for - backward compatibility). Instead, tensors should be saved either with - :func:`ctx.save_for_backward` if they are intended to be used in - ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` - if they are intended to be used for in ``jvp``. - - - .. py:method:: backward(ctx, grad_output: torch.Tensor) -> torch.Tensor - :staticmethod: - - Define a formula for differentiating the operation with backward mode automatic differentiation. - - This function is to be overridden by all subclasses. - (Defining this function is equivalent to defining the ``vjp`` function.) - - It must accept a context :attr:`ctx` as the first argument, followed by - as many outputs as the :func:`forward` returned (None will be passed in - for non tensor outputs of the forward function), - and it should return as many tensors, as there were inputs to - :func:`forward`. Each argument is the gradient w.r.t the given output, - and each returned value should be the gradient w.r.t. the - corresponding input. If an input is not a Tensor or is a Tensor not - requiring grads, you can just pass None as a gradient for that input. - - The context can be used to retrieve tensors saved during the forward - pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple - of booleans representing whether each input needs gradient. E.g., - :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the - first input to :func:`forward` needs gradient computed w.r.t. the - output. - - - -.. py:class:: ScatterToModelParallelRegion(*args, **kwargs) - - - Bases: :py:obj:`torch.autograd.Function` - - Base class to create custom `autograd.Function`. - - To create a custom `autograd.Function`, subclass this class and implement - the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom - op in the forward pass, call the class method ``apply``. Do not call - :meth:`forward` directly. - - To ensure correctness and best performance, make sure you are calling the - correct methods on ``ctx`` and validating your backward function using - :func:`torch.autograd.gradcheck`. - - See :ref:`extending-autograd` for more details on how to use this class. - - Examples:: - - >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) - >>> class Exp(Function): - >>> @staticmethod - >>> def forward(ctx, i): - >>> result = i.exp() - >>> ctx.save_for_backward(result) - >>> return result - >>> - >>> @staticmethod - >>> def backward(ctx, grad_output): - >>> result, = ctx.saved_tensors - >>> return grad_output * result - >>> - >>> # Use it by calling the apply method: - >>> # xdoctest: +SKIP - >>> output = Exp.apply(input) - - .. py:method:: forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor - :staticmethod: - - Define the forward of the custom autograd Function. - - This function is to be overridden by all subclasses. - There are two ways to define forward: - - Usage 1 (Combined forward and ctx):: - - @staticmethod - def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: - pass - - - It must accept a context ctx as the first argument, followed by any - number of arguments (tensors or other types). - - See :ref:`combining-forward-context` for more details - - Usage 2 (Separate forward and ctx):: - - @staticmethod - def forward(*args: Any, **kwargs: Any) -> Any: - pass - - @staticmethod - def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: - pass - - - The forward no longer accepts a ctx argument. - - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` - staticmethod to handle setting up the ``ctx`` object. - ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs - to the forward. - - See :ref:`extending-autograd` for more details - - The context can be used to store arbitrary data that can be then - retrieved during the backward pass. Tensors should not be stored - directly on `ctx` (though this is not currently enforced for - backward compatibility). Instead, tensors should be saved either with - :func:`ctx.save_for_backward` if they are intended to be used in - ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` - if they are intended to be used for in ``jvp``. - - - .. py:method:: backward(ctx, grad_output: torch.Tensor) - :staticmethod: - - Define a formula for differentiating the operation with backward mode automatic differentiation. - - This function is to be overridden by all subclasses. - (Defining this function is equivalent to defining the ``vjp`` function.) - - It must accept a context :attr:`ctx` as the first argument, followed by - as many outputs as the :func:`forward` returned (None will be passed in - for non tensor outputs of the forward function), - and it should return as many tensors, as there were inputs to - :func:`forward`. Each argument is the gradient w.r.t the given output, - and each returned value should be the gradient w.r.t. the - corresponding input. If an input is not a Tensor or is a Tensor not - requiring grads, you can just pass None as a gradient for that input. - - The context can be used to retrieve tensors saved during the forward - pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple - of booleans representing whether each input needs gradient. E.g., - :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the - first input to :func:`forward` needs gradient computed w.r.t. the - output. - - - -.. py:class:: GatherFromModelParallelRegion(*args, **kwargs) - - - Bases: :py:obj:`torch.autograd.Function` - - Base class to create custom `autograd.Function`. - - To create a custom `autograd.Function`, subclass this class and implement - the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom - op in the forward pass, call the class method ``apply``. Do not call - :meth:`forward` directly. - - To ensure correctness and best performance, make sure you are calling the - correct methods on ``ctx`` and validating your backward function using - :func:`torch.autograd.gradcheck`. - - See :ref:`extending-autograd` for more details on how to use this class. - - Examples:: - - >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) - >>> class Exp(Function): - >>> @staticmethod - >>> def forward(ctx, i): - >>> result = i.exp() - >>> ctx.save_for_backward(result) - >>> return result - >>> - >>> @staticmethod - >>> def backward(ctx, grad_output): - >>> result, = ctx.saved_tensors - >>> return grad_output * result - >>> - >>> # Use it by calling the apply method: - >>> # xdoctest: +SKIP - >>> output = Exp.apply(input) - - .. py:method:: forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor - :staticmethod: - - Define the forward of the custom autograd Function. - - This function is to be overridden by all subclasses. - There are two ways to define forward: - - Usage 1 (Combined forward and ctx):: - - @staticmethod - def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: - pass - - - It must accept a context ctx as the first argument, followed by any - number of arguments (tensors or other types). - - See :ref:`combining-forward-context` for more details - - Usage 2 (Separate forward and ctx):: - - @staticmethod - def forward(*args: Any, **kwargs: Any) -> Any: - pass - - @staticmethod - def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: - pass - - - The forward no longer accepts a ctx argument. - - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` - staticmethod to handle setting up the ``ctx`` object. - ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs - to the forward. - - See :ref:`extending-autograd` for more details - - The context can be used to store arbitrary data that can be then - retrieved during the backward pass. Tensors should not be stored - directly on `ctx` (though this is not currently enforced for - backward compatibility). Instead, tensors should be saved either with - :func:`ctx.save_for_backward` if they are intended to be used in - ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` - if they are intended to be used for in ``jvp``. - - - .. py:method:: backward(ctx, grad_output: torch.Tensor) - :staticmethod: - - Define a formula for differentiating the operation with backward mode automatic differentiation. - - This function is to be overridden by all subclasses. - (Defining this function is equivalent to defining the ``vjp`` function.) - - It must accept a context :attr:`ctx` as the first argument, followed by - as many outputs as the :func:`forward` returned (None will be passed in - for non tensor outputs of the forward function), - and it should return as many tensors, as there were inputs to - :func:`forward`. Each argument is the gradient w.r.t the given output, - and each returned value should be the gradient w.r.t. the - corresponding input. If an input is not a Tensor or is a Tensor not - requiring grads, you can just pass None as a gradient for that input. - - The context can be used to retrieve tensors saved during the forward - pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple - of booleans representing whether each input needs gradient. E.g., - :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the - first input to :func:`forward` needs gradient computed w.r.t. the - output. - - - -.. py:function:: copy_to_model_parallel_region(input: torch.Tensor) -> torch.Tensor - - -.. py:function:: reduce_from_model_parallel_region(input: torch.Tensor) -> torch.Tensor - - -.. py:function:: scatter_to_model_parallel_region(input: torch.Tensor, dim: int = -1) -> torch.Tensor - - -.. py:function:: gather_from_model_parallel_region(input: torch.Tensor, dim: int = -1) -> torch.Tensor - - diff --git a/_sources/autoapi/fairchem/core/common/hpo_utils/index.rst b/_sources/autoapi/fairchem/core/common/hpo_utils/index.rst deleted file mode 100644 index 534cd87da..000000000 --- a/_sources/autoapi/fairchem/core/common/hpo_utils/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -:py:mod:`fairchem.core.common.hpo_utils` -======================================== - -.. py:module:: fairchem.core.common.hpo_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.hpo_utils.tune_reporter - fairchem.core.common.hpo_utils.label_metric_dict - - - -.. py:function:: tune_reporter(iters, train_metrics, val_metrics, test_metrics=None, metric_to_opt: str = 'val_loss', min_max: str = 'min') -> None - - Wrapper function for tune.report() - - :param iters: dict with training iteration info (e.g. steps, epochs) - :type iters: dict - :param train_metrics: train metrics dict - :type train_metrics: dict - :param val_metrics: val metrics dict - :type val_metrics: dict - :param test_metrics: test metrics dict, default is None - :type test_metrics: dict, optional - :param metric_to_opt: str for val metric to optimize, default is val_loss - :type metric_to_opt: str, optional - :param min_max: either "min" or "max", determines whether metric_to_opt is to be minimized or maximized, default is min - :type min_max: str, optional - - -.. py:function:: label_metric_dict(metric_dict, split) - - diff --git a/_sources/autoapi/fairchem/core/common/index.rst b/_sources/autoapi/fairchem/core/common/index.rst deleted file mode 100644 index 37ade38b6..000000000 --- a/_sources/autoapi/fairchem/core/common/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -:py:mod:`fairchem.core.common` -============================== - -.. py:module:: fairchem.core.common - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - relaxation/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - data_parallel/index.rst - distutils/index.rst - flags/index.rst - gp_utils/index.rst - hpo_utils/index.rst - logger/index.rst - registry/index.rst - transforms/index.rst - tutorial_utils/index.rst - typing/index.rst - utils/index.rst - - diff --git a/_sources/autoapi/fairchem/core/common/logger/index.rst b/_sources/autoapi/fairchem/core/common/logger/index.rst deleted file mode 100644 index fa5b56ede..000000000 --- a/_sources/autoapi/fairchem/core/common/logger/index.rst +++ /dev/null @@ -1,107 +0,0 @@ -:py:mod:`fairchem.core.common.logger` -===================================== - -.. py:module:: fairchem.core.common.logger - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.logger.Logger - fairchem.core.common.logger.WandBLogger - fairchem.core.common.logger.TensorboardLogger - - - - -.. py:class:: Logger(config) - - - Bases: :py:obj:`abc.ABC` - - Generic class to interface with various logging modules, e.g. wandb, - tensorboard, etc. - - .. py:method:: watch(model) - :abstractmethod: - - Monitor parameters and gradients. - - - .. py:method:: log(update_dict, step: int, split: str = '') - - Log some values. - - - .. py:method:: log_plots(plots) -> None - :abstractmethod: - - - .. py:method:: mark_preempting() -> None - :abstractmethod: - - - -.. py:class:: WandBLogger(config) - - - Bases: :py:obj:`Logger` - - Generic class to interface with various logging modules, e.g. wandb, - tensorboard, etc. - - .. py:method:: watch(model) -> None - - Monitor parameters and gradients. - - - .. py:method:: log(update_dict, step: int, split: str = '') -> None - - Log some values. - - - .. py:method:: log_plots(plots, caption: str = '') -> None - - - .. py:method:: mark_preempting() -> None - - - -.. py:class:: TensorboardLogger(config) - - - Bases: :py:obj:`Logger` - - Generic class to interface with various logging modules, e.g. wandb, - tensorboard, etc. - - .. py:method:: watch(model) -> bool - - Monitor parameters and gradients. - - - .. py:method:: log(update_dict, step: int, split: str = '') - - Log some values. - - - .. py:method:: mark_preempting() -> None - - - .. py:method:: log_plots(plots) -> None - - - diff --git a/_sources/autoapi/fairchem/core/common/registry/index.rst b/_sources/autoapi/fairchem/core/common/registry/index.rst deleted file mode 100644 index b998f7ac6..000000000 --- a/_sources/autoapi/fairchem/core/common/registry/index.rst +++ /dev/null @@ -1,244 +0,0 @@ -:py:mod:`fairchem.core.common.registry` -======================================= - -.. py:module:: fairchem.core.common.registry - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - # Copyright (c) Meta, Inc. and its affiliates. - # Borrowed from https://github.com/facebookresearch/pythia/blob/master/pythia/common/registry.py. - - Registry is central source of truth. Inspired from Redux's concept of - global store, Registry maintains mappings of various information to unique - keys. Special functions in registry can be used as decorators to register - different kind of classes. - - Import the global registry object using - - ``from fairchem.core.common.registry import registry`` - - Various decorators for registry different kind of classes with unique keys - - - Register a model: ``@registry.register_model`` - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.registry.Registry - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.registry._get_absolute_mapping - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.registry.R - fairchem.core.common.registry.NestedDict - fairchem.core.common.registry.registry - - -.. py:data:: R - - - -.. py:data:: NestedDict - - - -.. py:function:: _get_absolute_mapping(name: str) - - -.. py:class:: Registry - - - Class for registry object which acts as central source of truth. - - .. py:attribute:: mapping - :type: ClassVar[NestedDict] - - - - .. py:method:: register_task(name: str) - :classmethod: - - Register a new task to registry with key 'name' - :param name: Key with which the task will be registered. - - Usage:: - from fairchem.core.common.registry import registry - from fairchem.core.tasks import BaseTask - @registry.register_task("train") - class TrainTask(BaseTask): - ... - - - .. py:method:: register_dataset(name: str) - :classmethod: - - Register a dataset to registry with key 'name' - - :param name: Key with which the dataset will be registered. - - Usage:: - - from fairchem.core.common.registry import registry - from fairchem.core.datasets import BaseDataset - - @registry.register_dataset("qm9") - class QM9(BaseDataset): - ... - - - .. py:method:: register_model(name: str) - :classmethod: - - Register a model to registry with key 'name' - - :param name: Key with which the model will be registered. - - Usage:: - - from fairchem.core.common.registry import registry - from fairchem.core.modules.layers import CGCNNConv - - @registry.register_model("cgcnn") - class CGCNN(): - ... - - - .. py:method:: register_logger(name: str) - :classmethod: - - Register a logger to registry with key 'name' - - :param name: Key with which the logger will be registered. - - Usage:: - - from fairchem.core.common.registry import registry - - @registry.register_logger("wandb") - class WandBLogger(): - ... - - - .. py:method:: register_trainer(name: str) - :classmethod: - - Register a trainer to registry with key 'name' - - :param name: Key with which the trainer will be registered. - - Usage:: - - from fairchem.core.common.registry import registry - - @registry.register_trainer("active_discovery") - class ActiveDiscoveryTrainer(): - ... - - - .. py:method:: register(name: str, obj) -> None - :classmethod: - - Register an item to registry with key 'name' - - :param name: Key with which the item will be registered. - - Usage:: - - from fairchem.core.common.registry import registry - - registry.register("config", {}) - - - .. py:method:: __import_error(name: str, mapping_name: str) -> RuntimeError - :classmethod: - - - .. py:method:: get_class(name: str, mapping_name: str) - :classmethod: - - - .. py:method:: get_task_class(name: str) - :classmethod: - - - .. py:method:: get_dataset_class(name: str) - :classmethod: - - - .. py:method:: get_model_class(name: str) - :classmethod: - - - .. py:method:: get_logger_class(name: str) - :classmethod: - - - .. py:method:: get_trainer_class(name: str) - :classmethod: - - - .. py:method:: get(name: str, default=None, no_warning: bool = False) - :classmethod: - - Get an item from registry with key 'name' - - :param name: Key whose value needs to be retrieved. - :type name: string - :param default: If passed and key is not in registry, default value will - be returned with a warning. Default: None - :param no_warning: If passed as True, warning when key doesn't exist - will not be generated. Useful for cgcnn's - internal operations. Default: False - :type no_warning: bool - - Usage:: - - from fairchem.core.common.registry import registry - - config = registry.get("config") - - - .. py:method:: unregister(name: str) - :classmethod: - - Remove an item from registry with key 'name' - - :param name: Key which needs to be removed. - - Usage:: - - from fairchem.core.common.registry import registry - - config = registry.unregister("config") - - - -.. py:data:: registry - - - diff --git a/_sources/autoapi/fairchem/core/common/relaxation/ase_utils/index.rst b/_sources/autoapi/fairchem/core/common/relaxation/ase_utils/index.rst deleted file mode 100644 index daa30b637..000000000 --- a/_sources/autoapi/fairchem/core/common/relaxation/ase_utils/index.rst +++ /dev/null @@ -1,105 +0,0 @@ -:py:mod:`fairchem.core.common.relaxation.ase_utils` -=================================================== - -.. py:module:: fairchem.core.common.relaxation.ase_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - - Utilities to interface OCP models/trainers with the Atomic Simulation - Environment (ASE) - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.relaxation.ase_utils.OCPCalculator - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.relaxation.ase_utils.batch_to_atoms - - - -.. py:function:: batch_to_atoms(batch) - - -.. py:class:: OCPCalculator(config_yml: str | None = None, checkpoint_path: str | None = None, model_name: str | None = None, local_cache: str | None = None, trainer: str | None = None, cutoff: int = 6, max_neighbors: int = 50, cpu: bool = True, seed: int | None = None) - - - Bases: :py:obj:`ase.calculators.calculator.Calculator` - - Base-class for all ASE calculators. - - A calculator must raise PropertyNotImplementedError if asked for a - property that it can't calculate. So, if calculation of the - stress tensor has not been implemented, get_stress(atoms) should - raise PropertyNotImplementedError. This can be achieved simply by not - including the string 'stress' in the list implemented_properties - which is a class member. These are the names of the standard - properties: 'energy', 'forces', 'stress', 'dipole', 'charges', - 'magmom' and 'magmoms'. - - .. py:attribute:: implemented_properties - :type: ClassVar[list[str]] - :value: ['energy', 'forces'] - - - - .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None - - Load existing trained model - - :param checkpoint_path: string - Path to trained model - - - .. py:method:: calculate(atoms: ase.Atoms, properties, system_changes) -> None - - Do the calculation. - - properties: list of str - List of what needs to be calculated. Can be any combination - of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom' - and 'magmoms'. - system_changes: list of str - List of what has changed since last calculation. Can be - any combination of these six: 'positions', 'numbers', 'cell', - 'pbc', 'initial_charges' and 'initial_magmoms'. - - Subclasses need to implement this, but can ignore properties - and system_changes if they want. Calculated properties should - be inserted into results dictionary like shown in this dummy - example:: - - self.results = {'energy': 0.0, - 'forces': np.zeros((len(atoms), 3)), - 'stress': np.zeros(6), - 'dipole': np.zeros(3), - 'charges': np.zeros(len(atoms)), - 'magmom': 0.0, - 'magmoms': np.zeros(len(atoms))} - - The subclass implementation should first call this - implementation to set the atoms attribute and create any missing - directories. - - - diff --git a/_sources/autoapi/fairchem/core/common/relaxation/index.rst b/_sources/autoapi/fairchem/core/common/relaxation/index.rst deleted file mode 100644 index 5fe59d0af..000000000 --- a/_sources/autoapi/fairchem/core/common/relaxation/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -:py:mod:`fairchem.core.common.relaxation` -========================================= - -.. py:module:: fairchem.core.common.relaxation - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - optimizers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - ase_utils/index.rst - ml_relaxation/index.rst - - diff --git a/_sources/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.rst b/_sources/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.rst deleted file mode 100644 index de9b4350b..000000000 --- a/_sources/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -:py:mod:`fairchem.core.common.relaxation.ml_relaxation` -======================================================= - -.. py:module:: fairchem.core.common.relaxation.ml_relaxation - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.relaxation.ml_relaxation.ml_relax - - - -.. py:function:: ml_relax(batch, model, steps: int, fmax: float, relax_opt, save_full_traj, device: str = 'cuda:0', transform=None, early_stop_batch: bool = False) - - Runs ML-based relaxations. - :param batch: object - :param model: object - :param steps: int - Max number of steps in the structure relaxation. - :param fmax: float - Structure relaxation terminates when the max force - of the system is no bigger than fmax. - :param relax_opt: str - Optimizer and corresponding parameters to be used for structure relaxations. - :param save_full_traj: bool - Whether to save out the full ASE trajectory. If False, only save out initial and final frames. - - diff --git a/_sources/autoapi/fairchem/core/common/relaxation/optimizers/index.rst b/_sources/autoapi/fairchem/core/common/relaxation/optimizers/index.rst deleted file mode 100644 index fe1b79cc7..000000000 --- a/_sources/autoapi/fairchem/core/common/relaxation/optimizers/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -:py:mod:`fairchem.core.common.relaxation.optimizers` -==================================================== - -.. py:module:: fairchem.core.common.relaxation.optimizers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - lbfgs_torch/index.rst - - diff --git a/_sources/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.rst b/_sources/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.rst deleted file mode 100644 index 1d70b89b3..000000000 --- a/_sources/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.rst +++ /dev/null @@ -1,60 +0,0 @@ -:py:mod:`fairchem.core.common.relaxation.optimizers.lbfgs_torch` -================================================================ - -.. py:module:: fairchem.core.common.relaxation.optimizers.lbfgs_torch - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.relaxation.optimizers.lbfgs_torch.LBFGS - fairchem.core.common.relaxation.optimizers.lbfgs_torch.TorchCalc - - - - -.. py:class:: LBFGS(batch: torch_geometric.data.Batch, model: TorchCalc, maxstep: float = 0.01, memory: int = 100, damping: float = 0.25, alpha: float = 100.0, force_consistent=None, device: str = 'cuda:0', save_full_traj: bool = True, traj_dir: pathlib.Path | None = None, traj_names=None, early_stop_batch: bool = False) - - - .. py:method:: get_energy_and_forces(apply_constraint: bool = True) - - - .. py:method:: set_positions(update, update_mask) -> None - - - .. py:method:: check_convergence(iteration, forces=None, energy=None) - - - .. py:method:: run(fmax, steps) - - - .. py:method:: step(iteration: int, forces: torch.Tensor | None, update_mask: torch.Tensor) -> None - - - .. py:method:: write(energy, forces, update_mask) -> None - - - -.. py:class:: TorchCalc(model, transform=None) - - - .. py:method:: get_energy_and_forces(atoms, apply_constraint: bool = True) - - - .. py:method:: update_graph(atoms) - - - diff --git a/_sources/autoapi/fairchem/core/common/transforms/index.rst b/_sources/autoapi/fairchem/core/common/transforms/index.rst deleted file mode 100644 index e99ece550..000000000 --- a/_sources/autoapi/fairchem/core/common/transforms/index.rst +++ /dev/null @@ -1,50 +0,0 @@ -:py:mod:`fairchem.core.common.transforms` -========================================= - -.. py:module:: fairchem.core.common.transforms - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.transforms.RandomRotate - - - - -.. py:class:: RandomRotate(degrees, axes: list[int] | None = None) - - - Rotates node positions around a specific axis by a randomly sampled - factor within a given interval. - - :param degrees: Rotation interval from which the rotation - angle is sampled. If `degrees` is a number instead of a - tuple, the interval is given by :math:`[-\mathrm{degrees}, - \mathrm{degrees}]`. - :type degrees: tuple or float - :param axes: The rotation axes. (default: `[0, 1, 2]`) - :type axes: int, optional - - .. py:method:: __call__(data) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - diff --git a/_sources/autoapi/fairchem/core/common/tutorial_utils/index.rst b/_sources/autoapi/fairchem/core/common/tutorial_utils/index.rst deleted file mode 100644 index 4c744a1df..000000000 --- a/_sources/autoapi/fairchem/core/common/tutorial_utils/index.rst +++ /dev/null @@ -1,65 +0,0 @@ -:py:mod:`fairchem.core.common.tutorial_utils` -============================================= - -.. py:module:: fairchem.core.common.tutorial_utils - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.tutorial_utils.ocp_root - fairchem.core.common.tutorial_utils.ocp_main - fairchem.core.common.tutorial_utils.describe_ocp - fairchem.core.common.tutorial_utils.train_test_val_split - fairchem.core.common.tutorial_utils.generate_yml_config - - - -.. py:function:: ocp_root() - - Return the root directory of the installed ocp package. - - -.. py:function:: ocp_main() - - Return the path to ocp main.py - - -.. py:function:: describe_ocp() - - Print some system information that could be useful in debugging. - - -.. py:function:: train_test_val_split(ase_db, ttv=(0.8, 0.1, 0.1), files=('train.db', 'test.db', 'val.db'), seed=42) - - Split an ase db into train, test and validation dbs. - - ase_db: path to an ase db containing all the data. - ttv: a tuple containing the fraction of train, test and val data. This will be normalized. - files: a tuple of filenames to write the splits into. An exception is raised if these exist. - You should delete them first. - seed: an integer for the random number generator seed - - Returns the absolute path to files. - - -.. py:function:: generate_yml_config(checkpoint_path, yml='run.yml', delete=(), update=()) - - Generate a yml config file from an existing checkpoint file. - - checkpoint_path: string to path of an existing checkpoint - yml: name of file to write to. - pop: list of keys to remove from the config - update: dictionary of key:values to update - - Use a dot notation in update. - - Returns an absolute path to the generated yml file. - - diff --git a/_sources/autoapi/fairchem/core/common/typing/index.rst b/_sources/autoapi/fairchem/core/common/typing/index.rst deleted file mode 100644 index ad85b2489..000000000 --- a/_sources/autoapi/fairchem/core/common/typing/index.rst +++ /dev/null @@ -1,38 +0,0 @@ -:py:mod:`fairchem.core.common.typing` -===================================== - -.. py:module:: fairchem.core.common.typing - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.typing.assert_is_instance - fairchem.core.common.typing.none_throws - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.typing._T - - -.. py:data:: _T - - - -.. py:function:: assert_is_instance(obj: object, cls: type[_T]) -> _T - - -.. py:function:: none_throws(x: _T | None, msg: str | None = None) -> _T - - diff --git a/_sources/autoapi/fairchem/core/common/utils/index.rst b/_sources/autoapi/fairchem/core/common/utils/index.rst deleted file mode 100644 index e2f623143..000000000 --- a/_sources/autoapi/fairchem/core/common/utils/index.rst +++ /dev/null @@ -1,290 +0,0 @@ -:py:mod:`fairchem.core.common.utils` -==================================== - -.. py:module:: fairchem.core.common.utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.utils.UniqueKeyLoader - fairchem.core.common.utils.Complete - fairchem.core.common.utils.SeverityLevelBetween - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.common.utils.pyg2_data_transform - fairchem.core.common.utils.save_checkpoint - fairchem.core.common.utils.warmup_lr_lambda - fairchem.core.common.utils.print_cuda_usage - fairchem.core.common.utils.conditional_grad - fairchem.core.common.utils.plot_histogram - fairchem.core.common.utils.collate - fairchem.core.common.utils.add_edge_distance_to_graph - fairchem.core.common.utils._import_local_file - fairchem.core.common.utils.setup_experimental_imports - fairchem.core.common.utils._get_project_root - fairchem.core.common.utils.setup_imports - fairchem.core.common.utils.dict_set_recursively - fairchem.core.common.utils.parse_value - fairchem.core.common.utils.create_dict_from_args - fairchem.core.common.utils.load_config - fairchem.core.common.utils.build_config - fairchem.core.common.utils.create_grid - fairchem.core.common.utils.save_experiment_log - fairchem.core.common.utils.get_pbc_distances - fairchem.core.common.utils.radius_graph_pbc - fairchem.core.common.utils.get_max_neighbors_mask - fairchem.core.common.utils.get_pruned_edge_idx - fairchem.core.common.utils.merge_dicts - fairchem.core.common.utils.setup_logging - fairchem.core.common.utils.compute_neighbors - fairchem.core.common.utils.check_traj_files - fairchem.core.common.utils.new_trainer_context - fairchem.core.common.utils._resolve_scale_factor_submodule - fairchem.core.common.utils._report_incompat_keys - fairchem.core.common.utils.load_state_dict - fairchem.core.common.utils.scatter_det - fairchem.core.common.utils.get_commit_hash - fairchem.core.common.utils.cg_change_mat - fairchem.core.common.utils.irreps_sum - fairchem.core.common.utils.update_config - fairchem.core.common.utils.get_loss_module - - - -.. py:class:: UniqueKeyLoader(stream) - - - Bases: :py:obj:`yaml.SafeLoader` - - .. py:method:: construct_mapping(node, deep=False) - - - -.. py:function:: pyg2_data_transform(data: torch_geometric.data.Data) - - if we're on the new pyg (2.0 or later) and if the Data stored is in older format - we need to convert the data to the new format - - -.. py:function:: save_checkpoint(state, checkpoint_dir: str = 'checkpoints/', checkpoint_file: str = 'checkpoint.pt') -> str - - -.. py:class:: Complete - - - .. py:method:: __call__(data) - - - -.. py:function:: warmup_lr_lambda(current_step: int, optim_config) - - Returns a learning rate multiplier. - Till `warmup_steps`, learning rate linearly increases to `initial_lr`, - and then gets multiplied by `lr_gamma` every time a milestone is crossed. - - -.. py:function:: print_cuda_usage() -> None - - -.. py:function:: conditional_grad(dec) - - Decorator to enable/disable grad depending on whether force/energy predictions are being made - - -.. py:function:: plot_histogram(data, xlabel: str = '', ylabel: str = '', title: str = '') - - -.. py:function:: collate(data_list) - - -.. py:function:: add_edge_distance_to_graph(batch, device='cpu', dmin: float = 0.0, dmax: float = 6.0, num_gaussians: int = 50) - - -.. py:function:: _import_local_file(path: pathlib.Path, *, project_root: pathlib.Path) -> None - - Imports a Python file as a module - - :param path: The path to the file to import - :type path: Path - :param project_root: The root directory of the project (i.e., the "ocp" folder) - :type project_root: Path - - -.. py:function:: setup_experimental_imports(project_root: pathlib.Path) -> None - - Import selected directories of modules from the "experimental" subdirectory. - - If a file named ".include" is present in the "experimental" subdirectory, - this will be read as a list of experimental subdirectories whose module - (including in any subsubdirectories) should be imported. - - :param project_root: The root directory of the project (i.e., the "ocp" folder) - - -.. py:function:: _get_project_root() -> pathlib.Path - - Gets the root folder of the project (the "ocp" folder) - :return: The absolute path to the project root. - - -.. py:function:: setup_imports(config: dict | None = None) -> None - - -.. py:function:: dict_set_recursively(dictionary, key_sequence, val) -> None - - -.. py:function:: parse_value(value) - - Parse string as Python literal if possible and fallback to string. - - -.. py:function:: create_dict_from_args(args: list, sep: str = '.') - - Create a (nested) dictionary from console arguments. - Keys in different dictionary levels are separated by sep. - - -.. py:function:: load_config(path: str, previous_includes: list | None = None) - - -.. py:function:: build_config(args, args_override) - - -.. py:function:: create_grid(base_config, sweep_file: str) - - -.. py:function:: save_experiment_log(args, jobs, configs) - - -.. py:function:: get_pbc_distances(pos, edge_index, cell, cell_offsets, neighbors, return_offsets: bool = False, return_distance_vec: bool = False) - - -.. py:function:: radius_graph_pbc(data, radius, max_num_neighbors_threshold, enforce_max_neighbors_strictly: bool = False, pbc=None) - - -.. py:function:: get_max_neighbors_mask(natoms, index, atom_distance, max_num_neighbors_threshold, degeneracy_tolerance: float = 0.01, enforce_max_strictly: bool = False) - - Give a mask that filters out edges so that each atom has at most - `max_num_neighbors_threshold` neighbors. - Assumes that `index` is sorted. - - Enforcing the max strictly can force the arbitrary choice between - degenerate edges. This can lead to undesired behaviors; for - example, bulk formation energies which are not invariant to - unit cell choice. - - A degeneracy tolerance can help prevent sudden changes in edge - existence from small changes in atom position, for example, - rounding errors, slab relaxation, temperature, etc. - - -.. py:function:: get_pruned_edge_idx(edge_index, num_atoms: int, max_neigh: float = 1000000000.0) -> torch.Tensor - - -.. py:function:: merge_dicts(dict1: dict, dict2: dict) - - Recursively merge two dictionaries. - Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a - value, this will call itself recursively to merge these dictionaries. - This does not modify the input dictionaries (creates an internal copy). - Additionally returns a list of detected duplicates. - Adapted from https://github.com/TUM-DAML/seml/blob/master/seml/utils.py - - :param dict1: First dict. - :type dict1: dict - :param dict2: Second dict. Values in dict2 will override values from dict1 in case they share the same key. - :type dict2: dict - - :returns: **return_dict** -- Merged dictionaries. - :rtype: dict - - -.. py:class:: SeverityLevelBetween(min_level: int, max_level: int) - - - Bases: :py:obj:`logging.Filter` - - Filter instances are used to perform arbitrary filtering of LogRecords. - - Loggers and Handlers can optionally use Filter instances to filter - records as desired. The base filter class only allows events which are - below a certain point in the logger hierarchy. For example, a filter - initialized with "A.B" will allow events logged by loggers "A.B", - "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If - initialized with the empty string, all events are passed. - - .. py:method:: filter(record) -> bool - - Determine if the specified record is to be logged. - - Returns True if the record should be logged, or False otherwise. - If deemed appropriate, the record may be modified in-place. - - - -.. py:function:: setup_logging() -> None - - -.. py:function:: compute_neighbors(data, edge_index) - - -.. py:function:: check_traj_files(batch, traj_dir) -> bool - - -.. py:function:: new_trainer_context(*, config: dict[str, Any], distributed: bool = False) - - -.. py:function:: _resolve_scale_factor_submodule(model: torch.nn.Module, name: str) - - -.. py:function:: _report_incompat_keys(model: torch.nn.Module, keys: torch.nn.modules.module._IncompatibleKeys, strict: bool = False) -> tuple[list[str], list[str]] - - -.. py:function:: load_state_dict(module: torch.nn.Module, state_dict: collections.abc.Mapping[str, torch.Tensor], strict: bool = True) -> tuple[list[str], list[str]] - - -.. py:function:: scatter_det(*args, **kwargs) - - -.. py:function:: get_commit_hash() - - -.. py:function:: cg_change_mat(ang_mom: int, device: str = 'cpu') -> torch.tensor - - -.. py:function:: irreps_sum(ang_mom: int) -> int - - Returns the sum of the dimensions of the irreps up to the specified angular momentum. - - :param ang_mom: max angular momenttum to sum up dimensions of irreps - - -.. py:function:: update_config(base_config) - - Configs created prior to OCP 2.0 are organized a little different than they - are now. Update old configs to fit the new expected structure. - - -.. py:function:: get_loss_module(loss_name) - - diff --git a/_sources/autoapi/fairchem/core/datasets/_utils/index.rst b/_sources/autoapi/fairchem/core/datasets/_utils/index.rst deleted file mode 100644 index 414ac1b76..000000000 --- a/_sources/autoapi/fairchem/core/datasets/_utils/index.rst +++ /dev/null @@ -1,35 +0,0 @@ -:py:mod:`fairchem.core.datasets._utils` -======================================= - -.. py:module:: fairchem.core.datasets._utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets._utils.rename_data_object_keys - - - -.. py:function:: rename_data_object_keys(data_object: torch_geometric.data.Data, key_mapping: dict[str, str]) -> torch_geometric.data.Data - - Rename data object keys - - :param data_object: data object - :param key_mapping: dictionary specifying keys to rename and new names {prev_key: new_key} - - diff --git a/_sources/autoapi/fairchem/core/datasets/ase_datasets/index.rst b/_sources/autoapi/fairchem/core/datasets/ase_datasets/index.rst deleted file mode 100644 index 3985790af..000000000 --- a/_sources/autoapi/fairchem/core/datasets/ase_datasets/index.rst +++ /dev/null @@ -1,319 +0,0 @@ -:py:mod:`fairchem.core.datasets.ase_datasets` -============================================= - -.. py:module:: fairchem.core.datasets.ase_datasets - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.ase_datasets.AseAtomsDataset - fairchem.core.datasets.ase_datasets.AseReadDataset - fairchem.core.datasets.ase_datasets.AseReadMultiStructureDataset - fairchem.core.datasets.ase_datasets.AseDBDataset - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.ase_datasets.apply_one_tags - - - -.. py:function:: apply_one_tags(atoms: ase.Atoms, skip_if_nonzero: bool = True, skip_always: bool = False) - - This function will apply tags of 1 to an ASE atoms object. - It is used as an atoms_transform in the datasets contained in this file. - - Certain models will treat atoms differently depending on their tags. - For example, GemNet-OC by default will only compute triplet and quadruplet interactions - for atoms with non-zero tags. This model throws an error if there are no tagged atoms. - For this reason, the default behavior is to tag atoms in structures with no tags. - - :param skip_if_nonzero: If at least one atom has a nonzero tag, do not tag any atoms - :type skip_if_nonzero: bool - :param skip_always: Do not apply any tags. This arg exists so that this function can be disabled - without needing to pass a callable (which is currently difficult to do with main.py) - :type skip_always: bool - - -.. py:class:: AseAtomsDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`torch.utils.data.Dataset`, :py:obj:`abc.ABC` - - This is an abstract Dataset that includes helpful utilities for turning - ASE atoms objects into OCP-usable data objects. This should not be instantiated directly - as get_atoms_object and load_dataset_get_ids are not implemented in this base class. - - Derived classes must add at least two things: - self.get_atoms_object(id): a function that takes an identifier and returns a corresponding atoms object - - self.load_dataset_get_ids(config: dict): This function is responsible for any initialization/loads - of the dataset and importantly must return a list of all possible identifiers that can be passed into - self.get_atoms_object(id) - - Identifiers need not be any particular type. - - .. py:method:: __len__() -> int - - - .. py:method:: __getitem__(idx) - - - .. py:method:: get_atoms(idx: str | int) -> ase.Atoms - :abstractmethod: - - - .. py:method:: _load_dataset_get_ids(config) - :abstractmethod: - - - .. py:method:: get_relaxed_energy(identifier) - :abstractmethod: - - - .. py:method:: close_db() -> None - - - .. py:method:: get_metadata(num_samples: int = 100) -> dict - - - -.. py:class:: AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset uses ase.io.read to load data from a directory on disk. - This is intended for small-scale testing and demonstrations of OCP. - Larger datasets are better served by the efficiency of other dataset types - such as LMDB. - - For a full list of ASE-readable filetypes, see - https://wiki.fysik.dtu.dk/ase/ase/io/io.html - - :param config: src (str): The source folder that contains your ASE-readable files - - pattern (str): Filepath matching each file you want to read - ex. "*/POSCAR", "*.cif", "*.xyz" - search recursively with two wildcards: "**/POSCAR" or "**/*.cif" - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the files you read (ex. OUTCAR) - - ase_read_args (dict): Keyword arguments for ase.io.read() - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. - The relaxed structure is assumed to be the final structure in the file - (e.g. the last frame of a .traj). - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transform_args (dict): Additional keyword arguments for the transform callable - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config) -> list[pathlib.Path] - - - .. py:method:: get_atoms(idx: str | int) -> ase.Atoms - - - .. py:method:: get_relaxed_energy(identifier) -> float - - - -.. py:class:: AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset can read multiple structures from each file using ase.io.read. - The disadvantage is that all files must be read at startup. - This is a significant cost for large datasets. - - This is intended for small-scale testing and demonstrations of OCP. - Larger datasets are better served by the efficiency of other dataset types - such as LMDB. - - For a full list of ASE-readable filetypes, see - https://wiki.fysik.dtu.dk/ase/ase/io/io.html - - :param config: src (str): The source folder that contains your ASE-readable files - - pattern (str): Filepath matching each file you want to read - ex. "*.traj", "*.xyz" - search recursively with two wildcards: "**/POSCAR" or "**/*.cif" - - index_file (str): Filepath to an indexing file, which contains each filename - and the number of structures contained in each file. For instance: - - /path/to/relaxation1.traj 200 - /path/to/relaxation2.traj 150 - - This will overrule the src and pattern that you specify! - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the files you read (ex. OUTCAR) - - ase_read_args (dict): Keyword arguments for ase.io.read() - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. - The relaxed structure is assumed to be the final structure in the file - (e.g. the last frame of a .traj). - - use_tqdm (bool): Use TQDM progress bar when initializing dataset - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transform_args (dict): Additional keyword arguments for the transform callable - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - :param transform: Additional preprocessing function for the Data object - :type transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config) -> list[str] - - - .. py:method:: get_atoms(idx: str) -> ase.Atoms - - - .. py:method:: get_metadata(num_samples: int = 100) -> dict - - - .. py:method:: get_relaxed_energy(identifier) -> float - - - -.. py:class:: AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset connects to an ASE Database, allowing the storage of atoms objects - with a variety of backends including JSON, SQLite, and database server options. - - For more information, see: - https://databases.fysik.dtu.dk/ase/ase/db/db.html - - :param config: - src (str): Either - - the path an ASE DB, - - the connection address of an ASE DB, - - a folder with multiple ASE DBs, - - a list of folders with ASE DBs - - a glob string to use to find ASE DBs, or - - a list of ASE db paths/addresses. - If a folder, every file will be attempted as an ASE DB, and warnings - are raised for any files that can't connect cleanly - - Note that for large datasets, ID loading can be slow and there can be many - ids, so it's advised to make loading the id list as easy as possible. There is not - an obvious way to get a full list of ids from most ASE dbs besides simply looping - through the entire dataset. See the AseLMDBDataset which was written with this usecase - in mind. - - connect_args (dict): Keyword arguments for ase.db.connect() - - select_args (dict): Keyword arguments for ase.db.select() - You can use this to query/filter your database - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the database - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config} - where config is a dictionary specifying arguments to the transform_function - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - :param transform: deprecated? - :type transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config: dict) -> list[int] - - - .. py:method:: get_atoms(idx: int) -> ase.Atoms - - Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. - :param idx: index in dataset - :type idx: int - - :returns: ASE atoms corresponding to datapoint idx - :rtype: atoms - - - .. py:method:: connect_db(address: str | pathlib.Path, connect_args: dict | None = None) -> ase.db.core.Database - :staticmethod: - - - .. py:method:: close_db() -> None - - - .. py:method:: get_metadata(num_samples: int = 100) -> dict - - - .. py:method:: get_relaxed_energy(identifier) - :abstractmethod: - - - diff --git a/_sources/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.rst b/_sources/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.rst deleted file mode 100644 index db8a19cbb..000000000 --- a/_sources/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -:py:mod:`fairchem.core.datasets.embeddings.atomic_radii` -======================================================== - -.. py:module:: fairchem.core.datasets.embeddings.atomic_radii - -.. autoapi-nested-parse:: - - Atomic radii in picometers - - NaN stored for unavailable parameters. - - - -Module Contents ---------------- - -.. py:data:: ATOMIC_RADII - - - diff --git a/_sources/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.rst b/_sources/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.rst deleted file mode 100644 index acec8a51a..000000000 --- a/_sources/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -:py:mod:`fairchem.core.datasets.embeddings.continuous_embeddings` -================================================================= - -.. py:module:: fairchem.core.datasets.embeddings.continuous_embeddings - -.. autoapi-nested-parse:: - - CGCNN-like embeddings using continuous values instead of original k-hot. - - Properties: - Group number - Period number - Electronegativity - Covalent radius - Valence electrons - First ionization energy - Electron affinity - Block - Atomic Volume - - NaN stored for unavaialable parameters. - - - -Module Contents ---------------- - -.. py:data:: CONTINUOUS_EMBEDDINGS - - - diff --git a/_sources/autoapi/fairchem/core/datasets/embeddings/index.rst b/_sources/autoapi/fairchem/core/datasets/embeddings/index.rst deleted file mode 100644 index ddb829125..000000000 --- a/_sources/autoapi/fairchem/core/datasets/embeddings/index.rst +++ /dev/null @@ -1,37 +0,0 @@ -:py:mod:`fairchem.core.datasets.embeddings` -=========================================== - -.. py:module:: fairchem.core.datasets.embeddings - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - atomic_radii/index.rst - continuous_embeddings/index.rst - khot_embeddings/index.rst - qmof_khot_embeddings/index.rst - - -Package Contents ----------------- - -.. py:data:: ATOMIC_RADII - - - -.. py:data:: CONTINUOUS_EMBEDDINGS - - - -.. py:data:: KHOT_EMBEDDINGS - - - -.. py:data:: QMOF_KHOT_EMBEDDINGS - - - diff --git a/_sources/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.rst b/_sources/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.rst deleted file mode 100644 index 123b707ad..000000000 --- a/_sources/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:py:mod:`fairchem.core.datasets.embeddings.khot_embeddings` -=========================================================== - -.. py:module:: fairchem.core.datasets.embeddings.khot_embeddings - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - Original CGCNN k-hot elemental embeddings. - - - -Module Contents ---------------- - -.. py:data:: KHOT_EMBEDDINGS - - - diff --git a/_sources/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.rst b/_sources/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.rst deleted file mode 100644 index 37b8c99c0..000000000 --- a/_sources/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.core.datasets.embeddings.qmof_khot_embeddings` -================================================================ - -.. py:module:: fairchem.core.datasets.embeddings.qmof_khot_embeddings - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - k-hot elemental embeddings from QMOF, motivated by the following Github Issue threads: - https://github.com/txie-93/cgcnn/issues/2 - https://github.com/arosen93/QMOF/issues/18 - - - -Module Contents ---------------- - -.. py:data:: QMOF_KHOT_EMBEDDINGS - - - diff --git a/_sources/autoapi/fairchem/core/datasets/index.rst b/_sources/autoapi/fairchem/core/datasets/index.rst deleted file mode 100644 index 4d47947ed..000000000 --- a/_sources/autoapi/fairchem/core/datasets/index.rst +++ /dev/null @@ -1,487 +0,0 @@ -:py:mod:`fairchem.core.datasets` -================================ - -.. py:module:: fairchem.core.datasets - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - embeddings/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - _utils/index.rst - ase_datasets/index.rst - lmdb_database/index.rst - lmdb_dataset/index.rst - oc22_lmdb_dataset/index.rst - target_metadata_guesser/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.AseDBDataset - fairchem.core.datasets.AseReadDataset - fairchem.core.datasets.AseReadMultiStructureDataset - fairchem.core.datasets.LMDBDatabase - fairchem.core.datasets.LmdbDataset - fairchem.core.datasets.SinglePointLmdbDataset - fairchem.core.datasets.TrajectoryLmdbDataset - fairchem.core.datasets.OC22LmdbDataset - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.data_list_collater - - - -.. py:class:: AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset connects to an ASE Database, allowing the storage of atoms objects - with a variety of backends including JSON, SQLite, and database server options. - - For more information, see: - https://databases.fysik.dtu.dk/ase/ase/db/db.html - - :param config: - src (str): Either - - the path an ASE DB, - - the connection address of an ASE DB, - - a folder with multiple ASE DBs, - - a list of folders with ASE DBs - - a glob string to use to find ASE DBs, or - - a list of ASE db paths/addresses. - If a folder, every file will be attempted as an ASE DB, and warnings - are raised for any files that can't connect cleanly - - Note that for large datasets, ID loading can be slow and there can be many - ids, so it's advised to make loading the id list as easy as possible. There is not - an obvious way to get a full list of ids from most ASE dbs besides simply looping - through the entire dataset. See the AseLMDBDataset which was written with this usecase - in mind. - - connect_args (dict): Keyword arguments for ase.db.connect() - - select_args (dict): Keyword arguments for ase.db.select() - You can use this to query/filter your database - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the database - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config} - where config is a dictionary specifying arguments to the transform_function - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - :param transform: deprecated? - :type transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config: dict) -> list[int] - - - .. py:method:: get_atoms(idx: int) -> ase.Atoms - - Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. - :param idx: index in dataset - :type idx: int - - :returns: ASE atoms corresponding to datapoint idx - :rtype: atoms - - - .. py:method:: connect_db(address: str | pathlib.Path, connect_args: dict | None = None) -> ase.db.core.Database - :staticmethod: - - - .. py:method:: close_db() -> None - - - .. py:method:: get_metadata(num_samples: int = 100) -> dict - - - .. py:method:: get_relaxed_energy(identifier) - :abstractmethod: - - - -.. py:class:: AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset uses ase.io.read to load data from a directory on disk. - This is intended for small-scale testing and demonstrations of OCP. - Larger datasets are better served by the efficiency of other dataset types - such as LMDB. - - For a full list of ASE-readable filetypes, see - https://wiki.fysik.dtu.dk/ase/ase/io/io.html - - :param config: src (str): The source folder that contains your ASE-readable files - - pattern (str): Filepath matching each file you want to read - ex. "*/POSCAR", "*.cif", "*.xyz" - search recursively with two wildcards: "**/POSCAR" or "**/*.cif" - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the files you read (ex. OUTCAR) - - ase_read_args (dict): Keyword arguments for ase.io.read() - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. - The relaxed structure is assumed to be the final structure in the file - (e.g. the last frame of a .traj). - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transform_args (dict): Additional keyword arguments for the transform callable - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config) -> list[pathlib.Path] - - - .. py:method:: get_atoms(idx: str | int) -> ase.Atoms - - - .. py:method:: get_relaxed_energy(identifier) -> float - - - -.. py:class:: AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags) - - - Bases: :py:obj:`AseAtomsDataset` - - This Dataset can read multiple structures from each file using ase.io.read. - The disadvantage is that all files must be read at startup. - This is a significant cost for large datasets. - - This is intended for small-scale testing and demonstrations of OCP. - Larger datasets are better served by the efficiency of other dataset types - such as LMDB. - - For a full list of ASE-readable filetypes, see - https://wiki.fysik.dtu.dk/ase/ase/io/io.html - - :param config: src (str): The source folder that contains your ASE-readable files - - pattern (str): Filepath matching each file you want to read - ex. "*.traj", "*.xyz" - search recursively with two wildcards: "**/POSCAR" or "**/*.cif" - - index_file (str): Filepath to an indexing file, which contains each filename - and the number of structures contained in each file. For instance: - - /path/to/relaxation1.traj 200 - /path/to/relaxation2.traj 150 - - This will overrule the src and pattern that you specify! - - a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs() - default options will work for most users - - If you are using this for a training dataset, set - "r_energy":True, "r_forces":True, and/or "r_stress":True as appropriate - In that case, energy/forces must be in the files you read (ex. OUTCAR) - - ase_read_args (dict): Keyword arguments for ase.io.read() - - keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need - to iterate over a dataset many times (e.g. training for many epochs). - Not recommended for large datasets. - - include_relaxed_energy (bool): Include the relaxed energy in the resulting data object. - The relaxed structure is assumed to be the final structure in the file - (e.g. the last frame of a .traj). - - use_tqdm (bool): Use TQDM progress bar when initializing dataset - - atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable - - transform_args (dict): Additional keyword arguments for the transform callable - - key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used - in the model with the corresponding property as it was named in the dataset. Only need to use if - the name is different. - :type config: dict - :param atoms_transform: Additional preprocessing function applied to the Atoms - object. Useful for applying tags, for example. - :type atoms_transform: callable, optional - :param transform: Additional preprocessing function for the Data object - :type transform: callable, optional - - .. py:method:: _load_dataset_get_ids(config) -> list[str] - - - .. py:method:: get_atoms(idx: str) -> ase.Atoms - - - .. py:method:: get_metadata(num_samples: int = 100) -> dict - - - .. py:method:: get_relaxed_energy(identifier) -> float - - - -.. py:class:: LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs) - - - Bases: :py:obj:`ase.db.core.Database` - - Base class for all databases. - - .. py:property:: metadata - - Load the metadata from the DB if present - - .. py:property:: _nextid - - Get the id of the next row to be written - - .. py:method:: __enter__() -> typing_extensions.Self - - - .. py:method:: __exit__(exc_type, exc_value, tb) -> None - - - .. py:method:: close() -> None - - - .. py:method:: _write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) -> None - - - .. py:method:: _update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None) - - - .. py:method:: _write_deleted_ids() - - - .. py:method:: delete(ids: list[int]) -> None - - Delete rows. - - - .. py:method:: _get_row(idx: int, include_data: bool = True) - - - .. py:method:: _get_row_by_index(index: int, include_data: bool = True) - - Auxiliary function to get the ith entry, rather than a specific id - - - .. py:method:: _select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all') - - - .. py:method:: count(selection=None, **kwargs) -> int - - Count rows. - - See the select() method for the selection syntax. Use db.count() or - len(db) to count all rows. - - - .. py:method:: _load_ids() -> None - - Load ids from the DB - - Since ASE db ids are mostly 1-N integers, but can be missing entries - if ids have been deleted. To save space and operating under the assumption - that there will probably not be many deletions in most OCP datasets, - we just store the deleted ids. - - - -.. py:class:: LmdbDataset(config) - - - Bases: :py:obj:`torch.utils.data.Dataset`\ [\ :py:obj:`T_co`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - .. py:attribute:: metadata_path - :type: pathlib.Path - - - - .. py:attribute:: sharded - :type: bool - - Dataset class to load from LMDB files containing relaxation - trajectories or single point computations. - Useful for Structure to Energy & Force (S2EF), Initial State to - Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. - The keys in the LMDB must be integers (stored as ascii objects) starting - from 0 through the length of the LMDB. For historical reasons any key named - "length" is ignored since that was used to infer length of many lmdbs in the same - folder, but lmdb lengths are now calculated directly from the number of keys. - :param config: Dataset configuration - :type config: dict - - .. py:method:: __len__() -> int - - - .. py:method:: __getitem__(idx: int) -> T_co - - - .. py:method:: connect_db(lmdb_path: pathlib.Path | None = None) -> lmdb.Environment - - - .. py:method:: close_db() -> None - - - .. py:method:: get_metadata(num_samples: int = 100) - - - -.. py:class:: SinglePointLmdbDataset(config, transform=None) - - - Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - -.. py:class:: TrajectoryLmdbDataset(config, transform=None) - - - Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - -.. py:function:: data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) -> torch_geometric.data.data.BaseData - - -.. py:class:: OC22LmdbDataset(config, transform=None) - - - Bases: :py:obj:`torch.utils.data.Dataset` - - Dataset class to load from LMDB files containing relaxation - trajectories or single point computations. - - Useful for Structure to Energy & Force (S2EF), Initial State to - Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. - - The keys in the LMDB must be integers (stored as ascii objects) starting - from 0 through the length of the LMDB. For historical reasons any key named - "length" is ignored since that was used to infer length of many lmdbs in the same - folder, but lmdb lengths are now calculated directly from the number of keys. - - :param config: Dataset configuration - :type config: dict - :param transform: Data transform function. - (default: :obj:`None`) - :type transform: callable, optional - - .. py:method:: __len__() -> int - - - .. py:method:: __getitem__(idx) - - - .. py:method:: connect_db(lmdb_path=None) - - - .. py:method:: close_db() -> None - - - diff --git a/_sources/autoapi/fairchem/core/datasets/lmdb_database/index.rst b/_sources/autoapi/fairchem/core/datasets/lmdb_database/index.rst deleted file mode 100644 index 1def5ab2e..000000000 --- a/_sources/autoapi/fairchem/core/datasets/lmdb_database/index.rst +++ /dev/null @@ -1,111 +0,0 @@ -:py:mod:`fairchem.core.datasets.lmdb_database` -============================================== - -.. py:module:: fairchem.core.datasets.lmdb_database - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is modified from the ASE db json backend - and is thus licensed under the corresponding LGPL2.1 license - - The ASE notice for the LGPL2.1 license is available here: - https://gitlab.com/ase/ase/-/blob/master/LICENSE - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.lmdb_database.LMDBDatabase - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.lmdb_database.RESERVED_KEYS - - -.. py:data:: RESERVED_KEYS - :value: ['nextid', 'metadata', 'deleted_ids'] - - - -.. py:class:: LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs) - - - Bases: :py:obj:`ase.db.core.Database` - - Base class for all databases. - - .. py:property:: metadata - - Load the metadata from the DB if present - - .. py:property:: _nextid - - Get the id of the next row to be written - - .. py:method:: __enter__() -> typing_extensions.Self - - - .. py:method:: __exit__(exc_type, exc_value, tb) -> None - - - .. py:method:: close() -> None - - - .. py:method:: _write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) -> None - - - .. py:method:: _update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None) - - - .. py:method:: _write_deleted_ids() - - - .. py:method:: delete(ids: list[int]) -> None - - Delete rows. - - - .. py:method:: _get_row(idx: int, include_data: bool = True) - - - .. py:method:: _get_row_by_index(index: int, include_data: bool = True) - - Auxiliary function to get the ith entry, rather than a specific id - - - .. py:method:: _select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all') - - - .. py:method:: count(selection=None, **kwargs) -> int - - Count rows. - - See the select() method for the selection syntax. Use db.count() or - len(db) to count all rows. - - - .. py:method:: _load_ids() -> None - - Load ids from the DB - - Since ASE db ids are mostly 1-N integers, but can be missing entries - if ids have been deleted. To save space and operating under the assumption - that there will probably not be many deletions in most OCP datasets, - we just store the deleted ids. - - - diff --git a/_sources/autoapi/fairchem/core/datasets/lmdb_dataset/index.rst b/_sources/autoapi/fairchem/core/datasets/lmdb_dataset/index.rst deleted file mode 100644 index 516cbc1d8..000000000 --- a/_sources/autoapi/fairchem/core/datasets/lmdb_dataset/index.rst +++ /dev/null @@ -1,154 +0,0 @@ -:py:mod:`fairchem.core.datasets.lmdb_dataset` -============================================= - -.. py:module:: fairchem.core.datasets.lmdb_dataset - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.lmdb_dataset.LmdbDataset - fairchem.core.datasets.lmdb_dataset.SinglePointLmdbDataset - fairchem.core.datasets.lmdb_dataset.TrajectoryLmdbDataset - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.lmdb_dataset.data_list_collater - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.lmdb_dataset.T_co - - -.. py:data:: T_co - - - -.. py:class:: LmdbDataset(config) - - - Bases: :py:obj:`torch.utils.data.Dataset`\ [\ :py:obj:`T_co`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - .. py:attribute:: metadata_path - :type: pathlib.Path - - - - .. py:attribute:: sharded - :type: bool - - Dataset class to load from LMDB files containing relaxation - trajectories or single point computations. - Useful for Structure to Energy & Force (S2EF), Initial State to - Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. - The keys in the LMDB must be integers (stored as ascii objects) starting - from 0 through the length of the LMDB. For historical reasons any key named - "length" is ignored since that was used to infer length of many lmdbs in the same - folder, but lmdb lengths are now calculated directly from the number of keys. - :param config: Dataset configuration - :type config: dict - - .. py:method:: __len__() -> int - - - .. py:method:: __getitem__(idx: int) -> T_co - - - .. py:method:: connect_db(lmdb_path: pathlib.Path | None = None) -> lmdb.Environment - - - .. py:method:: close_db() -> None - - - .. py:method:: get_metadata(num_samples: int = 100) - - - -.. py:class:: SinglePointLmdbDataset(config, transform=None) - - - Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - -.. py:class:: TrajectoryLmdbDataset(config, transform=None) - - - Bases: :py:obj:`LmdbDataset`\ [\ :py:obj:`torch_geometric.data.data.BaseData`\ ] - - An abstract class representing a :class:`Dataset`. - - All datasets that represent a map from keys to data samples should subclass - it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a - data sample for a given key. Subclasses could also optionally overwrite - :meth:`__len__`, which is expected to return the size of the dataset by many - :class:`~torch.utils.data.Sampler` implementations and the default options - of :class:`~torch.utils.data.DataLoader`. Subclasses could also - optionally implement :meth:`__getitems__`, for speedup batched samples - loading. This method accepts list of indices of samples of batch and returns - list of samples. - - .. note:: - :class:`~torch.utils.data.DataLoader` by default constructs an index - sampler that yields integral indices. To make it work with a map-style - dataset with non-integral indices/keys, a custom sampler must be provided. - - -.. py:function:: data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) -> torch_geometric.data.data.BaseData - - diff --git a/_sources/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.rst b/_sources/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.rst deleted file mode 100644 index 6fef390dd..000000000 --- a/_sources/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.rst +++ /dev/null @@ -1,62 +0,0 @@ -:py:mod:`fairchem.core.datasets.oc22_lmdb_dataset` -================================================== - -.. py:module:: fairchem.core.datasets.oc22_lmdb_dataset - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.oc22_lmdb_dataset.OC22LmdbDataset - - - - -.. py:class:: OC22LmdbDataset(config, transform=None) - - - Bases: :py:obj:`torch.utils.data.Dataset` - - Dataset class to load from LMDB files containing relaxation - trajectories or single point computations. - - Useful for Structure to Energy & Force (S2EF), Initial State to - Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. - - The keys in the LMDB must be integers (stored as ascii objects) starting - from 0 through the length of the LMDB. For historical reasons any key named - "length" is ignored since that was used to infer length of many lmdbs in the same - folder, but lmdb lengths are now calculated directly from the number of keys. - - :param config: Dataset configuration - :type config: dict - :param transform: Data transform function. - (default: :obj:`None`) - :type transform: callable, optional - - .. py:method:: __len__() -> int - - - .. py:method:: __getitem__(idx) - - - .. py:method:: connect_db(lmdb_path=None) - - - .. py:method:: close_db() -> None - - - diff --git a/_sources/autoapi/fairchem/core/datasets/target_metadata_guesser/index.rst b/_sources/autoapi/fairchem/core/datasets/target_metadata_guesser/index.rst deleted file mode 100644 index 7a4065b1f..000000000 --- a/_sources/autoapi/fairchem/core/datasets/target_metadata_guesser/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -:py:mod:`fairchem.core.datasets.target_metadata_guesser` -======================================================== - -.. py:module:: fairchem.core.datasets.target_metadata_guesser - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.datasets.target_metadata_guesser.uniform_atoms_lengths - fairchem.core.datasets.target_metadata_guesser.target_constant_shape - fairchem.core.datasets.target_metadata_guesser.target_per_atom - fairchem.core.datasets.target_metadata_guesser.target_extensive - fairchem.core.datasets.target_metadata_guesser.guess_target_metadata - fairchem.core.datasets.target_metadata_guesser.guess_property_metadata - - - -.. py:function:: uniform_atoms_lengths(atoms_lens) -> bool - - -.. py:function:: target_constant_shape(atoms_lens, target_samples) -> bool - - -.. py:function:: target_per_atom(atoms_lens, target_samples) -> bool - - -.. py:function:: target_extensive(atoms_lens, target_samples, threshold: float = 0.2) - - -.. py:function:: guess_target_metadata(atoms_len, target_samples) - - -.. py:function:: guess_property_metadata(atoms_list) - - diff --git a/_sources/autoapi/fairchem/core/index.rst b/_sources/autoapi/fairchem/core/index.rst deleted file mode 100644 index 9a537a155..000000000 --- a/_sources/autoapi/fairchem/core/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -:py:mod:`fairchem.core` -======================= - -.. py:module:: fairchem.core - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - common/index.rst - datasets/index.rst - models/index.rst - modules/index.rst - preprocessing/index.rst - scripts/index.rst - tasks/index.rst - tests/index.rst - trainers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - _cli/index.rst - - -Package Contents ----------------- - -.. py:data:: __version__ - - - diff --git a/_sources/autoapi/fairchem/core/models/base/index.rst b/_sources/autoapi/fairchem/core/models/base/index.rst deleted file mode 100644 index d76f60ba5..000000000 --- a/_sources/autoapi/fairchem/core/models/base/index.rst +++ /dev/null @@ -1,80 +0,0 @@ -:py:mod:`fairchem.core.models.base` -=================================== - -.. py:module:: fairchem.core.models.base - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.base.BaseModel - - - - -.. py:class:: BaseModel(num_atoms=None, bond_feat_dim=None, num_targets=None) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:property:: num_params - :type: int - - - .. py:method:: forward(data) - :abstractmethod: - - - .. py:method:: generate_graph(data, cutoff=None, max_neighbors=None, use_pbc=None, otf_graph=None, enforce_max_neighbors_strictly=None) - - - .. py:method:: no_weight_decay() -> list - - Returns a list of parameters with no weight decay. - - - diff --git a/_sources/autoapi/fairchem/core/models/dimenet_plus_plus/index.rst b/_sources/autoapi/fairchem/core/models/dimenet_plus_plus/index.rst deleted file mode 100644 index 056837b85..000000000 --- a/_sources/autoapi/fairchem/core/models/dimenet_plus_plus/index.rst +++ /dev/null @@ -1,256 +0,0 @@ -:py:mod:`fairchem.core.models.dimenet_plus_plus` -================================================ - -.. py:module:: fairchem.core.models.dimenet_plus_plus - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - --- - - This code borrows heavily from the DimeNet implementation as part of - pytorch-geometric: https://github.com/rusty1s/pytorch_geometric. License: - - --- - - Copyright (c) 2020 Matthias Fey - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.dimenet_plus_plus.InteractionPPBlock - fairchem.core.models.dimenet_plus_plus.OutputPPBlock - fairchem.core.models.dimenet_plus_plus.DimeNetPlusPlus - fairchem.core.models.dimenet_plus_plus.DimeNetPlusPlusWrap - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.dimenet_plus_plus.sym - - -.. py:data:: sym - - - -.. py:class:: InteractionPPBlock(hidden_channels: int, int_emb_size: int, basis_emb_size: int, num_spherical: int, num_radial: int, num_before_skip: int, num_after_skip: int, act='silu') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(x, rbf, sbf, idx_kj, idx_ji) - - - -.. py:class:: OutputPPBlock(num_radial: int, hidden_channels: int, out_emb_channels: int, out_channels: int, num_layers: int, act: str = 'silu') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(x, rbf, i, num_nodes: int | None = None) - - - -.. py:class:: DimeNetPlusPlus(hidden_channels: int, out_channels: int, num_blocks: int, int_emb_size: int, basis_emb_size: int, out_emb_channels: int, num_spherical: int, num_radial: int, cutoff: float = 5.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, act: str = 'silu') - - - Bases: :py:obj:`torch.nn.Module` - - DimeNet++ implementation based on https://github.com/klicperajo/dimenet. - - :param hidden_channels: Hidden embedding size. - :type hidden_channels: int - :param out_channels: Size of each output sample. - :type out_channels: int - :param num_blocks: Number of building blocks. - :type num_blocks: int - :param int_emb_size: Embedding size used for interaction triplets - :type int_emb_size: int - :param basis_emb_size: Embedding size used in the basis transformation - :type basis_emb_size: int - :param out_emb_channels: Embedding size used for atoms in the output block - :type out_emb_channels: int - :param num_spherical: Number of spherical harmonics. - :type num_spherical: int - :param num_radial: Number of radial basis functions. - :type num_radial: int - :param cutoff: (float, optional): Cutoff distance for interatomic - interactions. (default: :obj:`5.0`) - :param envelope_exponent: Shape of the smooth cutoff. - (default: :obj:`5`) - :type envelope_exponent: int, optional - :param num_before_skip: (int, optional): Number of residual layers in the - interaction blocks before the skip connection. (default: :obj:`1`) - :param num_after_skip: (int, optional): Number of residual layers in the - interaction blocks after the skip connection. (default: :obj:`2`) - :param num_output_layers: (int, optional): Number of linear layers for the - output blocks. (default: :obj:`3`) - :param act: (function, optional): The activation funtion. - (default: :obj:`silu`) - - .. py:attribute:: url - :value: 'https://github.com/klicperajo/dimenet/raw/master/pretrained' - - - - .. py:method:: reset_parameters() -> None - - - .. py:method:: triplets(edge_index, cell_offsets, num_nodes: int) - - - .. py:method:: forward(z, pos, batch=None) - :abstractmethod: - - - -.. py:class:: DimeNetPlusPlusWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, hidden_channels: int = 128, num_blocks: int = 4, int_emb_size: int = 64, basis_emb_size: int = 8, out_emb_channels: int = 256, num_spherical: int = 7, num_radial: int = 6, otf_graph: bool = False, cutoff: float = 10.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3) - - - Bases: :py:obj:`DimeNetPlusPlus`, :py:obj:`fairchem.core.models.base.BaseModel` - - DimeNet++ implementation based on https://github.com/klicperajo/dimenet. - - :param hidden_channels: Hidden embedding size. - :type hidden_channels: int - :param out_channels: Size of each output sample. - :type out_channels: int - :param num_blocks: Number of building blocks. - :type num_blocks: int - :param int_emb_size: Embedding size used for interaction triplets - :type int_emb_size: int - :param basis_emb_size: Embedding size used in the basis transformation - :type basis_emb_size: int - :param out_emb_channels: Embedding size used for atoms in the output block - :type out_emb_channels: int - :param num_spherical: Number of spherical harmonics. - :type num_spherical: int - :param num_radial: Number of radial basis functions. - :type num_radial: int - :param cutoff: (float, optional): Cutoff distance for interatomic - interactions. (default: :obj:`5.0`) - :param envelope_exponent: Shape of the smooth cutoff. - (default: :obj:`5`) - :type envelope_exponent: int, optional - :param num_before_skip: (int, optional): Number of residual layers in the - interaction blocks before the skip connection. (default: :obj:`1`) - :param num_after_skip: (int, optional): Number of residual layers in the - interaction blocks after the skip connection. (default: :obj:`2`) - :param num_output_layers: (int, optional): Number of linear layers for the - output blocks. (default: :obj:`3`) - :param act: (function, optional): The activation funtion. - (default: :obj:`silu`) - - .. py:property:: num_params - :type: int - - - .. py:method:: _forward(data) - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/activation/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/activation/index.rst deleted file mode 100644 index 851f3eb4a..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/activation/index.rst +++ /dev/null @@ -1,388 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.activation` -======================================================= - -.. py:module:: fairchem.core.models.equiformer_v2.activation - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.activation.ScaledSiLU - fairchem.core.models.equiformer_v2.activation.ScaledSwiGLU - fairchem.core.models.equiformer_v2.activation.SwiGLU - fairchem.core.models.equiformer_v2.activation.SmoothLeakyReLU - fairchem.core.models.equiformer_v2.activation.ScaledSmoothLeakyReLU - fairchem.core.models.equiformer_v2.activation.ScaledSigmoid - fairchem.core.models.equiformer_v2.activation.GateActivation - fairchem.core.models.equiformer_v2.activation.S2Activation - fairchem.core.models.equiformer_v2.activation.SeparableS2Activation - - - - -.. py:class:: ScaledSiLU(inplace: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(inputs) - - - .. py:method:: extra_repr() - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: ScaledSwiGLU(in_channels: int, out_channels: int, bias: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(inputs) - - - -.. py:class:: SwiGLU(in_channels: int, out_channels: int, bias: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(inputs) - - - -.. py:class:: SmoothLeakyReLU(negative_slope: float = 0.2) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x) - - - .. py:method:: extra_repr() - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: ScaledSmoothLeakyReLU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x) - - - .. py:method:: extra_repr() - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: ScaledSigmoid - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: GateActivation(lmax: int, mmax: int, num_channels: int) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(gating_scalars, input_tensors) - - `gating_scalars`: shape [N, lmax * num_channels] - `input_tensors`: shape [N, (lmax + 1) ** 2, num_channels] - - - -.. py:class:: S2Activation(lmax: int, mmax: int) - - - Bases: :py:obj:`torch.nn.Module` - - Assume we only have one resolution - - .. py:method:: forward(inputs, SO3_grid) - - - -.. py:class:: SeparableS2Activation(lmax: int, mmax: int) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input_scalars, input_tensors, SO3_grid) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/drop/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/drop/index.rst deleted file mode 100644 index 557e07fb0..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/drop/index.rst +++ /dev/null @@ -1,225 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.drop` -================================================= - -.. py:module:: fairchem.core.models.equiformer_v2.drop - -.. autoapi-nested-parse:: - - Add `extra_repr` into DropPath implemented by timm - for displaying more info. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.drop.DropPath - fairchem.core.models.equiformer_v2.drop.GraphDropPath - fairchem.core.models.equiformer_v2.drop.EquivariantDropout - fairchem.core.models.equiformer_v2.drop.EquivariantScalarsDropout - fairchem.core.models.equiformer_v2.drop.EquivariantDropoutArraySphericalHarmonics - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.drop.drop_path - - - -.. py:function:: drop_path(x: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor - - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use - 'survival rate' as the argument. - - -.. py:class:: DropPath(drop_prob: float) - - - Bases: :py:obj:`torch.nn.Module` - - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - .. py:method:: extra_repr() -> str - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: GraphDropPath(drop_prob: float) - - - Bases: :py:obj:`torch.nn.Module` - - Consider batch for graph data when dropping paths. - - .. py:method:: forward(x: torch.Tensor, batch) -> torch.Tensor - - - .. py:method:: extra_repr() -> str - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: EquivariantDropout(irreps, drop_prob: float) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: EquivariantScalarsDropout(irreps, drop_prob: float) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - .. py:method:: extra_repr() -> str - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - -.. py:class:: EquivariantDropoutArraySphericalHarmonics(drop_prob: float, drop_graph: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor, batch=None) -> torch.Tensor - - - .. py:method:: extra_repr() -> str - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.rst deleted file mode 100644 index 25ed17ead..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.edge_rot_mat` -========================================================= - -.. py:module:: fairchem.core.models.equiformer_v2.edge_rot_mat - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.edge_rot_mat.init_edge_rot_mat - - - -.. py:function:: init_edge_rot_mat(edge_distance_vec) - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.rst deleted file mode 100644 index ec56c766c..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.rst +++ /dev/null @@ -1,152 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.equiformer_v2_oc20` -=============================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.equiformer_v2_oc20 - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.equiformer_v2_oc20.EquiformerV2_OC20 - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.equiformer_v2_oc20._AVG_NUM_NODES - fairchem.core.models.equiformer_v2.equiformer_v2_oc20._AVG_DEGREE - - -.. py:data:: _AVG_NUM_NODES - :value: 77.81317 - - - -.. py:data:: _AVG_DEGREE - :value: 23.395238876342773 - - - -.. py:class:: EquiformerV2_OC20(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_neighbors: Maximum number of neighbors per atom - :type max_neighbors: int - :param max_radius: Maximum distance between nieghboring atoms in Angstroms - :type max_radius: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_layers: Number of layers in the GNN - :type num_layers: int - :param sphere_channels: Number of spherical channels (one set per resolution) - :type sphere_channels: int - :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention - :type attn_hidden_channels: int - :param num_heads: Number of attention heads - :type num_heads: int - :param attn_alpha_head: Number of channels for alpha vector in each attention head - :type attn_alpha_head: int - :param attn_value_head: Number of channels for value vector in each attention head - :type attn_value_head: int - :param ffn_hidden_channels: Number of hidden channels used during feedforward network - :type ffn_hidden_channels: int - :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh', 'rms_norm_sh']) - :type norm_type: str - :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) - :type lmax_list: int - :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) - :type mmax_list: int - :param grid_resolution: Resolution of SO3_Grid - :type grid_resolution: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param edge_channels: Number of channels for the edge invariant features - :type edge_channels: int - :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features - :type use_atom_edge_embedding: bool - :param share_atom_edge_embedding: Whether to share `atom_edge_embedding` across all blocks - :type share_atom_edge_embedding: bool - :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights - :type use_m_share_rad: bool - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param attn_activation: Type of activation function for SO(2) graph attention - :type attn_activation: str - :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer - :type use_s2_act_attn: bool - :param use_attn_renorm: Whether to re-normalize attention weights - :type use_attn_renorm: bool - :param ffn_activation: Type of activation function for feedforward network - :type ffn_activation: str - :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation - :type use_gate_act: bool - :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFNs. - :type use_grid_mlp: bool - :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. - :type use_sep_s2_act: bool - :param alpha_drop: Dropout rate for attention weights - :type alpha_drop: float - :param drop_path_rate: Drop path rate - :type drop_path_rate: float - :param proj_drop: Dropout rate for outputs of attention and FFN in Transformer blocks - :type proj_drop: float - :param weight_init: ['normal', 'uniform'] initialization of weights of linear layers except those in radial functions - :type weight_init: str - :param enforce_max_neighbors_strictly: When edges are subselected based on the `max_neighbors` arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number. - :type enforce_max_neighbors_strictly: bool - :param avg_num_nodes: Average number of nodes per graph - :type avg_num_nodes: float - :param avg_degree: Average degree of nodes in the graph - :type avg_degree: float - :param use_energy_lin_ref: Whether to add the per-atom energy references during prediction. - During training and validation, this should be kept `False` since we use the `lin_ref` parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. - During prediction (where we don't have energy targets), this can be set to `True` to add the per-atom linear references to the predicted energies. - :type use_energy_lin_ref: bool - :param load_energy_lin_ref: Whether to add nn.Parameters for the per-element energy references. - This additional flag is there to ensure compatibility when strict-loading checkpoints, since the `use_energy_lin_ref` flag can be either True or False even if the model is trained with linear references. - You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine. - :type load_energy_lin_ref: bool - - .. py:property:: num_params - - - .. py:method:: forward(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - .. py:method:: _init_weights(m) - - - .. py:method:: _uniform_init_rad_func_linear_weights(m) - - - .. py:method:: _uniform_init_linear_weights(m) - - - .. py:method:: no_weight_decay() -> set - - Returns a list of parameters with no weight decay. - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.rst deleted file mode 100644 index 86a4235e5..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.rst +++ /dev/null @@ -1,79 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.gaussian_rbf` -========================================================= - -.. py:module:: fairchem.core.models.equiformer_v2.gaussian_rbf - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.gaussian_rbf.GaussianRadialBasisLayer - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.gaussian_rbf.gaussian - - - -.. py:function:: gaussian(x: torch.Tensor, mean, std) -> torch.Tensor - - -.. py:class:: GaussianRadialBasisLayer(num_basis: int, cutoff: float) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist: torch.Tensor, node_atom=None, edge_src=None, edge_dst=None) - - - .. py:method:: extra_repr() - - Set the extra representation of the module. - - To print customized extra information, you should re-implement - this method in your own modules. Both single-line and multi-line - strings are acceptable. - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/index.rst deleted file mode 100644 index ffa8e9fa6..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/index.rst +++ /dev/null @@ -1,163 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2` -============================================ - -.. py:module:: fairchem.core.models.equiformer_v2 - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - trainers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - activation/index.rst - drop/index.rst - edge_rot_mat/index.rst - equiformer_v2_oc20/index.rst - gaussian_rbf/index.rst - input_block/index.rst - layer_norm/index.rst - module_list/index.rst - radial_function/index.rst - so2_ops/index.rst - so3/index.rst - transformer_block/index.rst - wigner/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.EquiformerV2 - - - - -.. py:class:: EquiformerV2(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_neighbors: Maximum number of neighbors per atom - :type max_neighbors: int - :param max_radius: Maximum distance between nieghboring atoms in Angstroms - :type max_radius: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_layers: Number of layers in the GNN - :type num_layers: int - :param sphere_channels: Number of spherical channels (one set per resolution) - :type sphere_channels: int - :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention - :type attn_hidden_channels: int - :param num_heads: Number of attention heads - :type num_heads: int - :param attn_alpha_head: Number of channels for alpha vector in each attention head - :type attn_alpha_head: int - :param attn_value_head: Number of channels for value vector in each attention head - :type attn_value_head: int - :param ffn_hidden_channels: Number of hidden channels used during feedforward network - :type ffn_hidden_channels: int - :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh', 'rms_norm_sh']) - :type norm_type: str - :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) - :type lmax_list: int - :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) - :type mmax_list: int - :param grid_resolution: Resolution of SO3_Grid - :type grid_resolution: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param edge_channels: Number of channels for the edge invariant features - :type edge_channels: int - :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features - :type use_atom_edge_embedding: bool - :param share_atom_edge_embedding: Whether to share `atom_edge_embedding` across all blocks - :type share_atom_edge_embedding: bool - :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights - :type use_m_share_rad: bool - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param attn_activation: Type of activation function for SO(2) graph attention - :type attn_activation: str - :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer - :type use_s2_act_attn: bool - :param use_attn_renorm: Whether to re-normalize attention weights - :type use_attn_renorm: bool - :param ffn_activation: Type of activation function for feedforward network - :type ffn_activation: str - :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation - :type use_gate_act: bool - :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFNs. - :type use_grid_mlp: bool - :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. - :type use_sep_s2_act: bool - :param alpha_drop: Dropout rate for attention weights - :type alpha_drop: float - :param drop_path_rate: Drop path rate - :type drop_path_rate: float - :param proj_drop: Dropout rate for outputs of attention and FFN in Transformer blocks - :type proj_drop: float - :param weight_init: ['normal', 'uniform'] initialization of weights of linear layers except those in radial functions - :type weight_init: str - :param enforce_max_neighbors_strictly: When edges are subselected based on the `max_neighbors` arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number. - :type enforce_max_neighbors_strictly: bool - :param avg_num_nodes: Average number of nodes per graph - :type avg_num_nodes: float - :param avg_degree: Average degree of nodes in the graph - :type avg_degree: float - :param use_energy_lin_ref: Whether to add the per-atom energy references during prediction. - During training and validation, this should be kept `False` since we use the `lin_ref` parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. - During prediction (where we don't have energy targets), this can be set to `True` to add the per-atom linear references to the predicted energies. - :type use_energy_lin_ref: bool - :param load_energy_lin_ref: Whether to add nn.Parameters for the per-element energy references. - This additional flag is there to ensure compatibility when strict-loading checkpoints, since the `use_energy_lin_ref` flag can be either True or False even if the model is trained with linear references. - You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine. - :type load_energy_lin_ref: bool - - .. py:property:: num_params - - - .. py:method:: forward(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - .. py:method:: _init_weights(m) - - - .. py:method:: _uniform_init_rad_func_linear_weights(m) - - - .. py:method:: _uniform_init_linear_weights(m) - - - .. py:method:: no_weight_decay() -> set - - Returns a list of parameters with no weight decay. - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/input_block/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/input_block/index.rst deleted file mode 100644 index 9a3e5301d..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/input_block/index.rst +++ /dev/null @@ -1,44 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.input_block` -======================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.input_block - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.input_block.EdgeDegreeEmbedding - - - - -.. py:class:: EdgeDegreeEmbedding(sphere_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool, rescale_factor) - - - Bases: :py:obj:`torch.nn.Module` - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings - :param mappingReduced: Class to convert l and m indices once node embedding is rotated - :type mappingReduced: CoefficientMappingModule - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. - The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. - :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features - :type use_atom_edge_embedding: bool - :param rescale_factor: Rescale the sum aggregation - :type rescale_factor: float - - .. py:method:: forward(atomic_numbers, edge_distance, edge_index) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.rst deleted file mode 100644 index e518a6e7e..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.rst +++ /dev/null @@ -1,168 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.layer_norm` -======================================================= - -.. py:module:: fairchem.core.models.equiformer_v2.layer_norm - -.. autoapi-nested-parse:: - - 1. Normalize features of shape (N, sphere_basis, C), - with sphere_basis = (lmax + 1) ** 2. - - 2. The difference from `layer_norm.py` is that all type-L vectors have - the same number of channels and input features are of shape (N, sphere_basis, C). - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.layer_norm.EquivariantLayerNormArray - fairchem.core.models.equiformer_v2.layer_norm.EquivariantLayerNormArraySphericalHarmonics - fairchem.core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonics - fairchem.core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonicsV2 - fairchem.core.models.equiformer_v2.layer_norm.EquivariantDegreeLayerScale - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.layer_norm.get_normalization_layer - fairchem.core.models.equiformer_v2.layer_norm.get_l_to_all_m_expand_index - - - -.. py:function:: get_normalization_layer(norm_type: str, lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') - - -.. py:function:: get_l_to_all_m_expand_index(lmax: int) - - -.. py:class:: EquivariantLayerNormArray(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: __repr__() -> str - - Return repr(self). - - - .. py:method:: forward(node_input) - - Assume input is of shape [N, sphere_basis, C] - - - -.. py:class:: EquivariantLayerNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', std_balance_degrees: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - 1. Normalize over L = 0. - 2. Normalize across all m components from degrees L > 0. - 3. Do not normalize separately for different L (L > 0). - - .. py:method:: __repr__() -> str - - Return repr(self). - - - .. py:method:: forward(node_input) - - Assume input is of shape [N, sphere_basis, C] - - - -.. py:class:: EquivariantRMSNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component') - - - Bases: :py:obj:`torch.nn.Module` - - 1. Normalize across all m components from degrees L >= 0. - - .. py:method:: __repr__() -> str - - Return repr(self). - - - .. py:method:: forward(node_input) - - Assume input is of shape [N, sphere_basis, C] - - - -.. py:class:: EquivariantRMSNormArraySphericalHarmonicsV2(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', centering: bool = True, std_balance_degrees: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - 1. Normalize across all m components from degrees L >= 0. - 2. Expand weights and multiply with normalized feature to prevent slicing and concatenation. - - .. py:method:: __repr__() -> str - - Return repr(self). - - - .. py:method:: forward(node_input) - - Assume input is of shape [N, sphere_basis, C] - - - -.. py:class:: EquivariantDegreeLayerScale(lmax: int, num_channels: int, scale_factor: float = 2.0) - - - Bases: :py:obj:`torch.nn.Module` - - 1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV'21)), we scale the output of both attention and FFN. - 2. For degree L > 0, we scale down the square root of 2 * L, which is to emulate halving the number of channels when using higher L. - - .. py:method:: __repr__() -> str - - Return repr(self). - - - .. py:method:: forward(node_input) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/module_list/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/module_list/index.rst deleted file mode 100644 index 8e549b9fa..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/module_list/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.module_list` -======================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.module_list - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.module_list.ModuleListInfo - - - - -.. py:class:: ModuleListInfo(info_str, modules=None) - - - Bases: :py:obj:`torch.nn.ModuleList` - - Holds submodules in a list. - - :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but - modules it contains are properly registered, and will be visible by all - :class:`~torch.nn.Module` methods. - - :param modules: an iterable of modules to add - :type modules: iterable, optional - - Example:: - - class MyModule(nn.Module): - def __init__(self): - super().__init__() - self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) - - def forward(self, x): - # ModuleList can act as an iterable, or be indexed using ints - for i, l in enumerate(self.linears): - x = self.linears[i // 2](x) + l(x) - return x - - .. py:method:: __repr__() -> str - - Return a custom repr for ModuleList that compresses repeated module representations. - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.rst deleted file mode 100644 index 3140c691f..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.radial_function` -============================================================ - -.. py:module:: fairchem.core.models.equiformer_v2.radial_function - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.radial_function.RadialFunction - - - - -.. py:class:: RadialFunction(channels_list) - - - Bases: :py:obj:`torch.nn.Module` - - Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels - - .. py:method:: forward(inputs) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.rst deleted file mode 100644 index e2cdcc1cc..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.rst +++ /dev/null @@ -1,89 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.so2_ops` -==================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.so2_ops - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.so2_ops.SO2_m_Convolution - fairchem.core.models.equiformer_v2.so2_ops.SO2_Convolution - fairchem.core.models.equiformer_v2.so2_ops.SO2_Linear - - - - -.. py:class:: SO2_m_Convolution(m: int, sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int]) - - - Bases: :py:obj:`torch.nn.Module` - - SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m - - :param m: Order of the spherical harmonic coefficients - :type m: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param m_output_channels: Number of output channels used during the SO(2) conv - :type m_output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - - .. py:method:: forward(x_m) - - - -.. py:class:: SO2_Convolution(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = True, edge_channels_list: list[int] | None = None, extra_m0_output_channels: int | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - SO(2) Block: Perform SO(2) convolutions for all m (orders) - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param m_output_channels: Number of output channels used during the SO(2) conv - :type m_output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param mappingReduced: Used to extract a subset of m components - :type mappingReduced: CoefficientMappingModule - :param internal_weights: If True, not using radial function to multiply inputs features - :type internal_weights: bool - :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. - :param extra_m0_output_channels: If not None, return `out_embedding` (SO3_Embedding) and `extra_m0_features` (Tensor). - :type extra_m0_output_channels: int - - .. py:method:: forward(x, x_edge) - - - -.. py:class:: SO2_Linear(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = False, edge_channels_list: list[int] | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - SO(2) Linear: Perform SO(2) linear for all m (orders). - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param m_output_channels: Number of output channels used during the SO(2) conv - :type m_output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param mappingReduced: Used to extract a subset of m components - :type mappingReduced: CoefficientMappingModule - :param internal_weights: If True, not using radial function to multiply inputs features - :type internal_weights: bool - :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. - - .. py:method:: forward(x, x_edge) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/so3/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/so3/index.rst deleted file mode 100644 index 411a919ec..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/so3/index.rst +++ /dev/null @@ -1,257 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.so3` -================================================ - -.. py:module:: fairchem.core.models.equiformer_v2.so3 - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - .. todo:: - - 1. Simplify the case when `num_resolutions` == 1. - 2. Remove indexing when the shape is the same. - 3. Move some functions outside classes and to separate files. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.so3.CoefficientMappingModule - fairchem.core.models.equiformer_v2.so3.SO3_Embedding - fairchem.core.models.equiformer_v2.so3.SO3_Rotation - fairchem.core.models.equiformer_v2.so3.SO3_Grid - fairchem.core.models.equiformer_v2.so3.SO3_Linear - fairchem.core.models.equiformer_v2.so3.SO3_LinearV2 - - - - -.. py:class:: CoefficientMappingModule(lmax_list: list[int], mmax_list: list[int]) - - - Bases: :py:obj:`torch.nn.Module` - - Helper module for coefficients used to reshape lval <--> m and to get coefficients of specific degree or order - - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - :param mmax_list (list: int): List of maximum order of the spherical harmonics - - .. py:method:: complex_idx(m: int, lmax: int, m_complex, l_harmonic) - - Add `m_complex` and `l_harmonic` to the input arguments - since we cannot use `self.m_complex`. - - - .. py:method:: coefficient_idx(lmax: int, mmax: int) - - - .. py:method:: get_rotate_inv_rescale(lmax: int, mmax: int) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - -.. py:class:: SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype) - - - Helper functions for performing operations on irreps embedding - - :param length: Batch size - :type length: int - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - :param num_channels: Number of channels - :type num_channels: int - :param device: Device of the output - :param dtype: type of the output tensors - - .. py:method:: clone() -> SO3_Embedding - - - .. py:method:: set_embedding(embedding) -> None - - - .. py:method:: set_lmax_mmax(lmax_list: list[int], mmax_list: list[int]) -> None - - - .. py:method:: _expand_edge(edge_index: torch.Tensor) -> None - - - .. py:method:: expand_edge(edge_index: torch.Tensor) - - - .. py:method:: _reduce_edge(edge_index: torch.Tensor, num_nodes: int) - - - .. py:method:: _m_primary(mapping) - - - .. py:method:: _l_primary(mapping) - - - .. py:method:: _rotate(SO3_rotation, lmax_list: list[int], mmax_list: list[int]) - - - .. py:method:: _rotate_inv(SO3_rotation, mappingReduced) - - - .. py:method:: _grid_act(SO3_grid, act, mappingReduced) - - - .. py:method:: to_grid(SO3_grid, lmax=-1) - - - .. py:method:: _from_grid(x_grid, SO3_grid, lmax: int = -1) - - - -.. py:class:: SO3_Rotation(lmax: int) - - - Bases: :py:obj:`torch.nn.Module` - - Helper functions for Wigner-D rotations - - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - - .. py:method:: set_wigner(rot_mat3x3) - - - .. py:method:: rotate(embedding, out_lmax: int, out_mmax: int) - - - .. py:method:: rotate_inv(embedding, in_lmax: int, in_mmax: int) - - - .. py:method:: RotationToWignerDMatrix(edge_rot_mat, start_lmax: int, end_lmax: int) -> torch.Tensor - - - -.. py:class:: SO3_Grid(lmax: int, mmax: int, normalization: str = 'integral', resolution: int | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Helper functions for grid representation of the irreps - - :param lmax: Maximum degree of the spherical harmonics - :type lmax: int - :param mmax: Maximum order of the spherical harmonics - :type mmax: int - - .. py:method:: get_to_grid_mat(device) - - - .. py:method:: get_from_grid_mat(device) - - - .. py:method:: to_grid(embedding, lmax: int, mmax: int) - - - .. py:method:: from_grid(grid, lmax: int, mmax: int) - - - -.. py:class:: SO3_Linear(in_features: int, out_features: int, lmax: int, bias: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input_embedding, output_scale=None) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - -.. py:class:: SO3_LinearV2(in_features: int, out_features: int, lmax: int, bias: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input_embedding) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.rst deleted file mode 100644 index b0f128942..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.rst +++ /dev/null @@ -1,88 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.trainers.energy_trainer` -==================================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.trainers.energy_trainer - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.trainers.energy_trainer.EquiformerV2EnergyTrainer - - - - -.. py:class:: EquiformerV2EnergyTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') - - - Bases: :py:obj:`fairchem.core.trainers.OCPTrainer` - - Trainer class for the Structure to Energy & Force (S2EF) and Initial State to - Relaxed State (IS2RS) tasks. - - .. note:: - - Examples of configurations for task, model, dataset and optimizer - can be found in `configs/ocp_s2ef `_ - and `configs/ocp_is2rs `_. - - :param task: Task configuration. - :type task: dict - :param model: Model configuration. - :type model: dict - :param outputs: Output property configuration. - :type outputs: dict - :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. - :type dataset: dict - :param optimizer: Optimizer configuration. - :type optimizer: dict - :param loss_fns: Loss function configuration. - :type loss_fns: dict - :param eval_metrics: Evaluation metrics configuration. - :type eval_metrics: dict - :param identifier: Experiment identifier that is appended to log directory. - :type identifier: str - :param run_dir: Path to the run directory where logs are to be saved. - (default: :obj:`None`) - :type run_dir: str, optional - :param is_debug: Run in debug mode. - (default: :obj:`False`) - :type is_debug: bool, optional - :param print_every: Frequency of printing logs. - (default: :obj:`100`) - :type print_every: int, optional - :param seed: Random number seed. - (default: :obj:`None`) - :type seed: int, optional - :param logger: Type of logger to be used. - (default: :obj:`wandb`) - :type logger: str, optional - :param local_rank: Local rank of the process, only applicable for distributed training. - (default: :obj:`0`) - :type local_rank: int, optional - :param amp: Run using automatic mixed precision. - (default: :obj:`False`) - :type amp: bool, optional - :param slurm: Slurm configuration. Currently just for keeping track. - (default: :obj:`{}`) - :type slurm: dict - :param noddp: Run model without DDP. - :type noddp: bool, optional - - .. py:method:: load_extras() - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.rst deleted file mode 100644 index 147f4d422..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.rst +++ /dev/null @@ -1,88 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.trainers.forces_trainer` -==================================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.trainers.forces_trainer - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.trainers.forces_trainer.EquiformerV2ForcesTrainer - - - - -.. py:class:: EquiformerV2ForcesTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') - - - Bases: :py:obj:`fairchem.core.trainers.OCPTrainer` - - Trainer class for the Structure to Energy & Force (S2EF) and Initial State to - Relaxed State (IS2RS) tasks. - - .. note:: - - Examples of configurations for task, model, dataset and optimizer - can be found in `configs/ocp_s2ef `_ - and `configs/ocp_is2rs `_. - - :param task: Task configuration. - :type task: dict - :param model: Model configuration. - :type model: dict - :param outputs: Output property configuration. - :type outputs: dict - :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. - :type dataset: dict - :param optimizer: Optimizer configuration. - :type optimizer: dict - :param loss_fns: Loss function configuration. - :type loss_fns: dict - :param eval_metrics: Evaluation metrics configuration. - :type eval_metrics: dict - :param identifier: Experiment identifier that is appended to log directory. - :type identifier: str - :param run_dir: Path to the run directory where logs are to be saved. - (default: :obj:`None`) - :type run_dir: str, optional - :param is_debug: Run in debug mode. - (default: :obj:`False`) - :type is_debug: bool, optional - :param print_every: Frequency of printing logs. - (default: :obj:`100`) - :type print_every: int, optional - :param seed: Random number seed. - (default: :obj:`None`) - :type seed: int, optional - :param logger: Type of logger to be used. - (default: :obj:`wandb`) - :type logger: str, optional - :param local_rank: Local rank of the process, only applicable for distributed training. - (default: :obj:`0`) - :type local_rank: int, optional - :param amp: Run using automatic mixed precision. - (default: :obj:`False`) - :type amp: bool, optional - :param slurm: Slurm configuration. Currently just for keeping track. - (default: :obj:`{}`) - :type slurm: dict - :param noddp: Run model without DDP. - :type noddp: bool, optional - - .. py:method:: load_extras() -> None - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/index.rst deleted file mode 100644 index 89acf0167..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.trainers` -===================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.trainers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - energy_trainer/index.rst - forces_trainer/index.rst - lr_scheduler/index.rst - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.rst deleted file mode 100644 index 273f598bd..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.rst +++ /dev/null @@ -1,97 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.trainers.lr_scheduler` -================================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.trainers.lr_scheduler - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.CosineLRLambda - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.MultistepLRLambda - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.LRScheduler - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.multiply - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.cosine_lr_lambda - fairchem.core.models.equiformer_v2.trainers.lr_scheduler.multistep_lr_lambda - - - -.. py:function:: multiply(obj, num) - - -.. py:function:: cosine_lr_lambda(current_step: int, scheduler_params) - - -.. py:class:: CosineLRLambda(scheduler_params) - - - .. py:method:: __call__(current_step: int) - - - -.. py:function:: multistep_lr_lambda(current_step: int, scheduler_params) -> float - - -.. py:class:: MultistepLRLambda(scheduler_params) - - - .. py:method:: __call__(current_step: int) -> float - - - -.. py:class:: LRScheduler(optimizer, config) - - - .. rubric:: Notes - - 1. scheduler.step() is called for every step for OC20 training. - 2. We use "scheduler_params" in .yml to specify scheduler parameters. - 3. For cosine learning rate, we use LambdaLR with lambda function being cosine: - scheduler: LambdaLR - scheduler_params: - lambda_type: cosine - ... - 4. Following 3., if `cosine` is used, `scheduler_params` in .yml looks like: - scheduler: LambdaLR - scheduler_params: - lambda_type: cosine - warmup_epochs: ... - warmup_factor: ... - lr_min_factor: ... - 5. Following 3., if `multistep` is used, `scheduler_params` in .yml looks like: - scheduler: LambdaLR - scheduler_params: - lambda_type: multistep - warmup_epochs: ... - warmup_factor: ... - decay_epochs: ... (list) - decay_rate: ... - - :param optimizer: torch optim object - :type optimizer: obj - :param config: Optim dict from the input config - :type config: dict - - .. py:method:: step(metrics=None, epoch=None) - - - .. py:method:: filter_kwargs(config) - - - .. py:method:: get_lr() -> float | None - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.rst deleted file mode 100644 index eab927024..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.rst +++ /dev/null @@ -1,165 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.transformer_block` -============================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.transformer_block - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.transformer_block.SO2EquivariantGraphAttention - fairchem.core.models.equiformer_v2.transformer_block.FeedForwardNetwork - fairchem.core.models.equiformer_v2.transformer_block.TransBlockV2 - - - - -.. py:class:: SO2EquivariantGraphAttention(sphere_channels: int, hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, activation='scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, use_gate_act: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.0) - - - Bases: :py:obj:`torch.nn.Module` - - SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing - SO(2) Convolution with radial function -> S2 Activation -> SO(2) Convolution -> attention weights and non-linear messages - attention weights * non-linear messages -> Linear - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during the SO(2) conv - :type hidden_channels: int - :param num_heads: Number of attention heads - :type num_heads: int - :param attn_alpha_head: Number of channels for alpha vector in each attention head - :type attn_alpha_head: int - :param attn_value_head: Number of channels for value vector in each attention head - :type attn_value_head: int - :param output_channels: Number of output channels - :type output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings - :param mappingReduced: Class to convert l and m indices once node embedding is rotated - :type mappingReduced: CoefficientMappingModule - :param SO3_grid: Class used to convert from grid the spherical harmonic representations - :type SO3_grid: SO3_grid - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. - The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. - :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features - :type use_atom_edge_embedding: bool - :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights - :type use_m_share_rad: bool - :param activation: Type of activation function - :type activation: str - :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer - :type use_s2_act_attn: bool - :param use_attn_renorm: Whether to re-normalize attention weights - :type use_attn_renorm: bool - :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation. - :type use_gate_act: bool - :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. - :type use_sep_s2_act: bool - :param alpha_drop: Dropout rate for attention weights - :type alpha_drop: float - - .. py:method:: forward(x: torch.Tensor, atomic_numbers, edge_distance: torch.Tensor, edge_index) - - - -.. py:class:: FeedForwardNetwork(sphere_channels: int, hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_grid, activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during feedforward network - :type hidden_channels: int - :param output_channels: Number of output channels - :type output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param SO3_grid: Class used to convert from grid the spherical harmonic representations - :type SO3_grid: SO3_grid - :param activation: Type of activation function - :type activation: str - :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation - :type use_gate_act: bool - :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs. - :type use_grid_mlp: bool - :param use_sep_s2_act: If `True`, use separable grid MLP when `use_grid_mlp` is True. - :type use_sep_s2_act: bool - - .. py:method:: forward(input_embedding) - - - -.. py:class:: TransBlockV2(sphere_channels: int, attn_hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, ffn_hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list: list[int], use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, attn_activation: str = 'silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, norm_type: str = 'rms_norm_sh', alpha_drop: float = 0.0, drop_path_rate: float = 0.0, proj_drop: float = 0.0) - - - Bases: :py:obj:`torch.nn.Module` - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param attn_hidden_channels: Number of hidden channels used during SO(2) graph attention - :type attn_hidden_channels: int - :param num_heads: Number of attention heads - :type num_heads: int - :param attn_alpha_head: Number of channels for alpha vector in each attention head - :type attn_alpha_head: int - :param attn_value_head: Number of channels for value vector in each attention head - :type attn_value_head: int - :param ffn_hidden_channels: Number of hidden channels used during feedforward network - :type ffn_hidden_channels: int - :param output_channels: Number of output channels - :type output_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param SO3_rotation (list: SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings - :param mappingReduced: Class to convert l and m indices once node embedding is rotated - :type mappingReduced: CoefficientMappingModule - :param SO3_grid: Class used to convert from grid the spherical harmonic representations - :type SO3_grid: SO3_grid - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param edge_channels_list (list: int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. - The last one will be used as hidden size when `use_atom_edge_embedding` is `True`. - :param use_atom_edge_embedding: Whether to use atomic embedding along with relative distance for edge scalar features - :type use_atom_edge_embedding: bool - :param use_m_share_rad: Whether all m components within a type-L vector of one channel share radial function weights - :type use_m_share_rad: bool - :param attn_activation: Type of activation function for SO(2) graph attention - :type attn_activation: str - :param use_s2_act_attn: Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer - :type use_s2_act_attn: bool - :param use_attn_renorm: Whether to re-normalize attention weights - :type use_attn_renorm: bool - :param ffn_activation: Type of activation function for feedforward network - :type ffn_activation: str - :param use_gate_act: If `True`, use gate activation. Otherwise, use S2 activation - :type use_gate_act: bool - :param use_grid_mlp: If `True`, use projecting to grids and performing MLPs for FFN. - :type use_grid_mlp: bool - :param use_sep_s2_act: If `True`, use separable S2 activation when `use_gate_act` is False. - :type use_sep_s2_act: bool - :param norm_type: Type of normalization layer (['layer_norm', 'layer_norm_sh']) - :type norm_type: str - :param alpha_drop: Dropout rate for attention weights - :type alpha_drop: float - :param drop_path_rate: Drop path rate - :type drop_path_rate: float - :param proj_drop: Dropout rate for outputs of attention and FFN - :type proj_drop: float - - .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, batch) - - - diff --git a/_sources/autoapi/fairchem/core/models/equiformer_v2/wigner/index.rst b/_sources/autoapi/fairchem/core/models/equiformer_v2/wigner/index.rst deleted file mode 100644 index 46097ca51..000000000 --- a/_sources/autoapi/fairchem/core/models/equiformer_v2/wigner/index.rst +++ /dev/null @@ -1,38 +0,0 @@ -:py:mod:`fairchem.core.models.equiformer_v2.wigner` -=================================================== - -.. py:module:: fairchem.core.models.equiformer_v2.wigner - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.wigner.wigner_D - fairchem.core.models.equiformer_v2.wigner._z_rot_mat - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.equiformer_v2.wigner._Jd - - -.. py:data:: _Jd - - - -.. py:function:: wigner_D(lv: int, alpha: torch.Tensor, beta: torch.Tensor, gamma: torch.Tensor) -> torch.Tensor - - -.. py:function:: _z_rot_mat(angle: torch.Tensor, lv: int) -> torch.Tensor - - diff --git a/_sources/autoapi/fairchem/core/models/escn/escn/index.rst b/_sources/autoapi/fairchem/core/models/escn/escn/index.rst deleted file mode 100644 index d6b6dbd0a..000000000 --- a/_sources/autoapi/fairchem/core/models/escn/escn/index.rst +++ /dev/null @@ -1,251 +0,0 @@ -:py:mod:`fairchem.core.models.escn.escn` -======================================== - -.. py:module:: fairchem.core.models.escn.escn - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.escn.escn.eSCN - fairchem.core.models.escn.escn.LayerBlock - fairchem.core.models.escn.escn.MessageBlock - fairchem.core.models.escn.escn.SO2Block - fairchem.core.models.escn.escn.SO2Conv - fairchem.core.models.escn.escn.EdgeBlock - fairchem.core.models.escn.escn.EnergyBlock - fairchem.core.models.escn.escn.ForceBlock - - - - -.. py:class:: eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Equivariant Spherical Channel Network - Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs - - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_neighbors: Maximum number of neighbors per atom - :type max_neighbors: int - :param cutoff: Maximum distance between nieghboring atoms in Angstroms - :type cutoff: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_layers: Number of layers in the GNN - :type num_layers: int - :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) - :type lmax_list: int - :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) - :type mmax_list: int - :param sphere_channels: Number of spherical channels (one set per resolution) - :type sphere_channels: int - :param hidden_channels: Number of hidden units in message passing - :type hidden_channels: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param edge_channels: Number of channels for the edge invariant features - :type edge_channels: int - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param basis_width_scalar: Width of distance basis function - :type basis_width_scalar: float - :param distance_resolution: Distance between distance basis functions in Angstroms - :type distance_resolution: float - :param show_timing_info: Show timing and memory info - :type show_timing_info: bool - - .. py:property:: num_params - :type: int - - - .. py:method:: forward(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - -.. py:class:: LayerBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act) - - - Bases: :py:obj:`torch.nn.Module` - - Layer block: Perform one layer (message passing and aggregation) of the GNN - - :param layer_idx: Layer number - :type layer_idx: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during the SO(2) conv - :type hidden_channels: int - :param edge_channels: Size of invariant edge embedding - :type edge_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param distance_expansion: Function used to compute distance embedding - :type distance_expansion: func - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param SO3_grid: Class used to convert from grid the spherical harmonic representations - :type SO3_grid: SO3_grid - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced) - - - -.. py:class:: MessageBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act) - - - Bases: :py:obj:`torch.nn.Module` - - Message block: Perform message passing - - :param layer_idx: Layer number - :type layer_idx: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during the SO(2) conv - :type hidden_channels: int - :param edge_channels: Size of invariant edge embedding - :type edge_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param distance_expansion: Function used to compute distance embedding - :type distance_expansion: func - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param SO3_grid: Class used to convert from grid the spherical harmonic representations - :type SO3_grid: SO3_grid - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced) - - - -.. py:class:: SO2Block(sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act) - - - Bases: :py:obj:`torch.nn.Module` - - SO(2) Block: Perform SO(2) convolutions for all m (orders) - - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during the SO(2) conv - :type hidden_channels: int - :param edge_channels: Size of invariant edge embedding - :type edge_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x, x_edge, mappingReduced) - - - -.. py:class:: SO2Conv(m: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act) - - - Bases: :py:obj:`torch.nn.Module` - - SO(2) Conv: Perform an SO(2) convolution - - :param m: Order of the spherical harmonic coefficients - :type m: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param hidden_channels: Number of hidden channels used during the SO(2) conv - :type hidden_channels: int - :param edge_channels: Size of invariant edge embedding - :type edge_channels: int - :param lmax_list (list: int): List of degrees (l) for each resolution - :param mmax_list (list: int): List of orders (m) for each resolution - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x_m, x_edge) -> torch.Tensor - - - -.. py:class:: EdgeBlock(edge_channels, distance_expansion, max_num_elements, act) - - - Bases: :py:obj:`torch.nn.Module` - - Edge Block: Compute invariant edge representation from edge diatances and atomic numbers - - :param edge_channels: Size of invariant edge embedding - :type edge_channels: int - :param distance_expansion: Function used to compute distance embedding - :type distance_expansion: func - :param max_num_elements: Maximum number of atomic numbers - :type max_num_elements: int - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(edge_distance, source_element, target_element) - - - -.. py:class:: EnergyBlock(num_channels: int, num_sphere_samples: int, act) - - - Bases: :py:obj:`torch.nn.Module` - - Energy Block: Output block computing the energy - - :param num_channels: Number of channels - :type num_channels: int - :param num_sphere_samples: Number of samples used to approximate the integral on the sphere - :type num_sphere_samples: int - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x_pt) -> torch.Tensor - - - -.. py:class:: ForceBlock(num_channels: int, num_sphere_samples: int, act) - - - Bases: :py:obj:`torch.nn.Module` - - Force Block: Output block computing the per atom forces - - :param num_channels: Number of channels - :type num_channels: int - :param num_sphere_samples: Number of samples used to approximate the integral on the sphere - :type num_sphere_samples: int - :param act: Non-linear activation function - :type act: function - - .. py:method:: forward(x_pt, sphere_points) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/escn/index.rst b/_sources/autoapi/fairchem/core/models/escn/index.rst deleted file mode 100644 index 5773a81ad..000000000 --- a/_sources/autoapi/fairchem/core/models/escn/index.rst +++ /dev/null @@ -1,84 +0,0 @@ -:py:mod:`fairchem.core.models.escn` -=================================== - -.. py:module:: fairchem.core.models.escn - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - escn/index.rst - so3/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.escn.eSCN - - - - -.. py:class:: eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Equivariant Spherical Channel Network - Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs - - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_neighbors: Maximum number of neighbors per atom - :type max_neighbors: int - :param cutoff: Maximum distance between nieghboring atoms in Angstroms - :type cutoff: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_layers: Number of layers in the GNN - :type num_layers: int - :param lmax_list: List of maximum degree of the spherical harmonics (1 to 10) - :type lmax_list: int - :param mmax_list: List of maximum order of the spherical harmonics (0 to lmax) - :type mmax_list: int - :param sphere_channels: Number of spherical channels (one set per resolution) - :type sphere_channels: int - :param hidden_channels: Number of hidden units in message passing - :type hidden_channels: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param edge_channels: Number of channels for the edge invariant features - :type edge_channels: int - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param basis_width_scalar: Width of distance basis function - :type basis_width_scalar: float - :param distance_resolution: Distance between distance basis functions in Angstroms - :type distance_resolution: float - :param show_timing_info: Show timing and memory info - :type show_timing_info: bool - - .. py:property:: num_params - :type: int - - - .. py:method:: forward(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - diff --git a/_sources/autoapi/fairchem/core/models/escn/so3/index.rst b/_sources/autoapi/fairchem/core/models/escn/so3/index.rst deleted file mode 100644 index 782f6c445..000000000 --- a/_sources/autoapi/fairchem/core/models/escn/so3/index.rst +++ /dev/null @@ -1,171 +0,0 @@ -:py:mod:`fairchem.core.models.escn.so3` -======================================= - -.. py:module:: fairchem.core.models.escn.so3 - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.escn.so3.CoefficientMapping - fairchem.core.models.escn.so3.SO3_Embedding - fairchem.core.models.escn.so3.SO3_Rotation - fairchem.core.models.escn.so3.SO3_Grid - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.escn.so3._Jd - - -.. py:data:: _Jd - - - -.. py:class:: CoefficientMapping(lmax_list: list[int], mmax_list: list[int], device) - - - Helper functions for coefficients used to reshape l<-->m and to get coefficients of specific degree or order - - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - :param mmax_list (list: int): List of maximum order of the spherical harmonics - :param device: Device of the output - - .. py:method:: complex_idx(m, lmax: int = -1) - - - .. py:method:: coefficient_idx(lmax: int, mmax: int) -> torch.Tensor - - - -.. py:class:: SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype) - - - Bases: :py:obj:`torch.nn.Module` - - Helper functions for irreps embedding - - :param length: Batch size - :type length: int - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - :param num_channels: Number of channels - :type num_channels: int - :param device: Device of the output - :param dtype: type of the output tensors - - .. py:method:: clone() -> SO3_Embedding - - - .. py:method:: set_embedding(embedding) -> None - - - .. py:method:: set_lmax_mmax(lmax_list, mmax_list) -> None - - - .. py:method:: _expand_edge(edge_index) -> None - - - .. py:method:: expand_edge(edge_index) -> SO3_Embedding - - - .. py:method:: _reduce_edge(edge_index, num_nodes: int) -> None - - - .. py:method:: _m_primary(mapping) -> None - - - .. py:method:: _l_primary(mapping) -> None - - - .. py:method:: _rotate(SO3_rotation, lmax_list, mmax_list) -> None - - - .. py:method:: _rotate_inv(SO3_rotation, mappingReduced) -> None - - - .. py:method:: _grid_act(SO3_grid, act, mappingReduced) -> None - - - .. py:method:: to_grid(SO3_grid, lmax: int = -1) -> torch.Tensor - - - .. py:method:: _from_grid(x_grid, SO3_grid, lmax: int = -1) -> None - - - -.. py:class:: SO3_Rotation(rot_mat3x3: torch.Tensor, lmax: list[int]) - - - Bases: :py:obj:`torch.nn.Module` - - Helper functions for Wigner-D rotations - - :param rot_mat3x3: Rotation matrix - :type rot_mat3x3: tensor - :param lmax_list (list: int): List of maximum degree of the spherical harmonics - - .. py:method:: set_lmax(lmax) -> None - - - .. py:method:: rotate(embedding, out_lmax, out_mmax) -> torch.Tensor - - - .. py:method:: rotate_inv(embedding, in_lmax, in_mmax) -> torch.Tensor - - - .. py:method:: RotationToWignerDMatrix(edge_rot_mat: torch.Tensor, start_lmax: int, end_lmax: int) -> torch.Tensor - - - .. py:method:: wigner_D(lval, alpha, beta, gamma) - - - .. py:method:: _z_rot_mat(angle: torch.Tensor, lv: int) -> torch.Tensor - - - -.. py:class:: SO3_Grid(lmax: int, mmax: int) - - - Bases: :py:obj:`torch.nn.Module` - - Helper functions for grid representation of the irreps - - :param lmax: Maximum degree of the spherical harmonics - :type lmax: int - :param mmax: Maximum order of the spherical harmonics - :type mmax: int - - .. py:method:: _initialize(device: torch.device) -> None - - - .. py:method:: get_to_grid_mat(device: torch.device) - - - .. py:method:: get_from_grid_mat(device: torch.device) - - - .. py:method:: to_grid(embedding: torch.Tensor, lmax: int, mmax: int) -> torch.Tensor - - - .. py:method:: from_grid(grid: torch.Tensor, lmax: int, mmax: int) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/gemnet/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/gemnet/index.rst deleted file mode 100644 index 2c52d2d7d..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/gemnet/index.rst +++ /dev/null @@ -1,127 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.gemnet` -============================================ - -.. py:module:: fairchem.core.models.gemnet.gemnet - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.gemnet.GemNetT - - - - -.. py:class:: GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - GemNet-T, triplets-only variant of GemNet - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param cutoff: Embedding cutoff for interactomic directions in Angstrom. - :type cutoff: float - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the cosine basis function. - :type cbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the json file containing the scaling factors. - :type scale_file: str - - .. py:property:: num_params - - - .. py:method:: get_triplets(edge_index, num_atoms) - - Get all b->a for each edge c->a. - It is possible that b=c, as long as the edges are distinct. - - :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c - * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c - * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) -> torch.Tensor - - - .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) - - Reorder edges to make finding counter-directional edges easier. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. Since we only use i->j - edges here, we lose some j->i edges and add others by - making it symmetric. - We could fix this by merging edge_index with its counter-edges, - including the cell_offsets, and then running torch.unique. - But this does not seem worth it. - - - .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) - - - .. py:method:: generate_interaction_graph(data) - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/index.rst deleted file mode 100644 index 36c179893..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/index.rst +++ /dev/null @@ -1,139 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet` -===================================== - -.. py:module:: fairchem.core.models.gemnet - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - layers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - gemnet/index.rst - initializers/index.rst - utils/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.GemNetT - - - - -.. py:class:: GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - GemNet-T, triplets-only variant of GemNet - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param cutoff: Embedding cutoff for interactomic directions in Angstrom. - :type cutoff: float - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the cosine basis function. - :type cbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the json file containing the scaling factors. - :type scale_file: str - - .. py:property:: num_params - - - .. py:method:: get_triplets(edge_index, num_atoms) - - Get all b->a for each edge c->a. - It is possible that b=c, as long as the edges are distinct. - - :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c - * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c - * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) -> torch.Tensor - - - .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) - - Reorder edges to make finding counter-directional edges easier. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. Since we only use i->j - edges here, we lose some j->i edges and add others by - making it symmetric. - We could fix this by merging edge_index with its counter-edges, - including the cell_offsets, and then running torch.unique. - But this does not seem worth it. - - - .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) - - - .. py:method:: generate_interaction_graph(data) - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/initializers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/initializers/index.rst deleted file mode 100644 index 5a7c40cc3..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/initializers/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.initializers` -================================================== - -.. py:module:: fairchem.core.models.gemnet.initializers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.initializers._standardize - fairchem.core.models.gemnet.initializers.he_orthogonal_init - - - -.. py:function:: _standardize(kernel) - - Makes sure that N*Var(W) = 1 and E[W] = 0 - - -.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor - - Generate a weight matrix with variance according to He (Kaiming) initialization. - Based on a random (semi-)orthogonal matrix neural networks - are expected to learn better when features are decorrelated - (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", - "Dropout: a simple way to prevent neural networks from overfitting", - "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.rst deleted file mode 100644 index dd0dfb82a..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.rst +++ /dev/null @@ -1,88 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.atom_update_block` -============================================================== - -.. py:module:: fairchem.core.models.gemnet.layers.atom_update_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.atom_update_block.AtomUpdateBlock - fairchem.core.models.gemnet.layers.atom_update_block.OutputBlock - - - - -.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None, name: str = 'atom_update') - - - Bases: :py:obj:`torch.nn.Module` - - Aggregate the message embeddings of the atoms - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_atom: Embedding size of the edges. - :type emb_size_atom: int - :param nHidden: Number of residual blocks. - :type nHidden: int - :param activation: Name of the activation function to use in the dense layers. - :type activation: callable/str - - .. py:method:: get_mlp(units_in, units, nHidden, activation) - - - .. py:method:: forward(h, m, rbf, id_j) - - :returns: **h** -- Atom embedding. - :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) - - - -.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation=None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs) - - - Bases: :py:obj:`AtomUpdateBlock` - - Combines the atom update block and subsequent final dense layer. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_atom: Embedding size of the edges. - :type emb_size_atom: int - :param nHidden: Number of residual blocks. - :type nHidden: int - :param num_targets: Number of targets. - :type num_targets: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - :param direct_forces: If true directly predict forces without taking the gradient of the energy potential. - :type direct_forces: bool - :param output_init: Kernel initializer of the final dense layer. - :type output_init: int - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(h, m, rbf, id_j) - - :returns: * **(E, F)** (*tuple*) - * **- E** (*torch.Tensor, shape=(nAtoms, num_targets)*) - * **- F** (*torch.Tensor, shape=(nEdges, num_targets)*) - * *Energy and force prediction* - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.rst deleted file mode 100644 index 21412e038..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.rst +++ /dev/null @@ -1,149 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.base_layers` -======================================================== - -.. py:module:: fairchem.core.models.gemnet.layers.base_layers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.base_layers.Dense - fairchem.core.models.gemnet.layers.base_layers.ScaledSiLU - fairchem.core.models.gemnet.layers.base_layers.SiQU - fairchem.core.models.gemnet.layers.base_layers.ResidualLayer - - - - -.. py:class:: Dense(in_features, out_features, bias: bool = False, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Combines dense layer with scaling for swish activation. - - :param units: Output embedding size. - :type units: int - :param activation: Name of the activation function to use. - :type activation: str - :param bias: True if use bias. - :type bias: bool - - .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None - - - .. py:method:: forward(x) - - - -.. py:class:: ScaledSiLU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x) - - - -.. py:class:: SiQU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x) - - - -.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) - - - Bases: :py:obj:`torch.nn.Module` - - Residual block with output scaled by 1/sqrt(2). - - :param units: Output embedding size. - :type units: int - :param nLayers: Number of dense layers. - :type nLayers: int - :param layer_kwargs: Keyword arguments for initializing the layers. - :type layer_kwargs: str - - .. py:method:: forward(input) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.rst deleted file mode 100644 index dfe595f2c..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.rst +++ /dev/null @@ -1,115 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.basis_utils` -======================================================== - -.. py:module:: fairchem.core.models.gemnet.layers.basis_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.basis_utils.Jn - fairchem.core.models.gemnet.layers.basis_utils.Jn_zeros - fairchem.core.models.gemnet.layers.basis_utils.spherical_bessel_formulas - fairchem.core.models.gemnet.layers.basis_utils.bessel_basis - fairchem.core.models.gemnet.layers.basis_utils.sph_harm_prefactor - fairchem.core.models.gemnet.layers.basis_utils.associated_legendre_polynomials - fairchem.core.models.gemnet.layers.basis_utils.real_sph_harm - - - -.. py:function:: Jn(r: float, n: int) - - numerical spherical bessel functions of order n - - -.. py:function:: Jn_zeros(n: int, k: int) - - Compute the first k zeros of the spherical bessel functions up to order n (excluded) - - -.. py:function:: spherical_bessel_formulas(n: int) - - Computes the sympy formulas for the spherical bessel functions up to order n (excluded) - - -.. py:function:: bessel_basis(n: int, k: int) - - Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to - order n (excluded) and maximum frequency k (excluded). - - :returns: - - list - Bessel basis formulas taking in a single argument x. - Has length n where each element has length k. -> In total n*k many. - :rtype: bess_basis - - -.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) - - Computes the constant pre-factor for the spherical harmonic of degree l and order m. - - :param l_degree: Degree of the spherical harmonic. l >= 0 - :type l_degree: int - :param m_order: Order of the spherical harmonic. -l <= m <= l - :type m_order: int - - :returns: **factor** - :rtype: float - - -.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) - - Computes string formulas of the associated legendre polynomials up to degree L (excluded). - - :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials (degree L is excluded). - :type L_maxdegree: int - :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. - :type zero_m_only: bool - :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only. - :type pos_m_only: bool - - :returns: **polynomials** -- Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many). - :rtype: list - - -.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) - - Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). - Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. - - :param L_maxdegree: Degree up to which to calculate the spherical harmonics (degree L is excluded). - :type L_maxdegree: int - :param use_theta: - - True: Expects the input of the formula strings to contain theta. - - False: Expects the input of the formula strings to contain z. - :type use_theta: bool - :param use_phi: - - True: Expects the input of the formula strings to contain phi. - - False: Expects the input of the formula strings to contain x and y. - Does nothing if zero_m_only is True - :type use_phi: bool - :param zero_m_only: If True only calculate the harmonics where m=0. - :type zero_m_only: bool - - :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical harmonics up - to degree L (where degree L is not excluded). - In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then - the total count is reduced to be only L many. - :rtype: list - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/efficient/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/efficient/index.rst deleted file mode 100644 index 45cad6ebe..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/efficient/index.rst +++ /dev/null @@ -1,85 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.efficient` -====================================================== - -.. py:module:: fairchem.core.models.gemnet.layers.efficient - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.efficient.EfficientInteractionDownProjection - fairchem.core.models.gemnet.layers.efficient.EfficientInteractionBilinear - - - - -.. py:class:: EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int) - - - Bases: :py:obj:`torch.nn.Module` - - Down projection in the efficient reformulation. - - :param emb_size_interm: Intermediate embedding size (down-projection size). - :type emb_size_interm: int - :param kernel_initializer: Initializer of the weight matrix. - :type kernel_initializer: callable - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(rbf, sph, id_ca, id_ragged_idx) - - :param rbf: - :type rbf: torch.Tensor, shape=(1, nEdges, num_radial) - :param sph: - :type sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) - :param id_ca: - :param id_ragged_idx: - - :returns: * **rbf_W1** (*torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)*) - * **sph** (*torch.Tensor, shape=(nEdges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges - - - -.. py:class:: EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int) - - - Bases: :py:obj:`torch.nn.Module` - - Efficient reformulation of the bilinear layer and subsequent summation. - - :param units_out: Embedding output size of the bilinear layer. - :type units_out: int - :param kernel_initializer: Initializer of the weight matrix. - :type kernel_initializer: callable - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(basis, m, id_reduce, id_ragged_idx) -> torch.Tensor - - :param basis: - :param m: - :type m: quadruplets: m = m_db , triplets: m = m_ba - :param id_reduce: - :param id_ragged_idx: - - :returns: **m_ca** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, units_out) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.rst deleted file mode 100644 index 80f062bee..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.rst +++ /dev/null @@ -1,70 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.embedding_block` -============================================================ - -.. py:module:: fairchem.core.models.gemnet.layers.embedding_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.embedding_block.AtomEmbedding - fairchem.core.models.gemnet.layers.embedding_block.EdgeEmbedding - - - - -.. py:class:: AtomEmbedding(emb_size, num_elements: int) - - - Bases: :py:obj:`torch.nn.Module` - - Initial atom embeddings based on the atom type - - :param emb_size: Atom embeddings size - :type emb_size: int - - .. py:method:: forward(Z) - - :returns: **h** -- Atom embeddings. - :rtype: torch.Tensor, shape=(nAtoms, emb_size) - - - -.. py:class:: EdgeEmbedding(atom_features, edge_features, out_features, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Edge embedding based on the concatenation of atom embeddings and subsequent dense layer. - - :param emb_size: Embedding size after the dense layer. - :type emb_size: int - :param activation: Activation function used in the dense layer. - :type activation: str - - .. py:method:: forward(h, m_rbf, idx_s, idx_t) - - :param h: - :param m_rbf: in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st - :type m_rbf: shape (nEdges, nFeatures) - :param idx_s: - :param idx_t: - - :returns: **m_st** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, emb_size) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/index.rst deleted file mode 100644 index df70b29c0..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers` -============================================ - -.. py:module:: fairchem.core.models.gemnet.layers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - atom_update_block/index.rst - base_layers/index.rst - basis_utils/index.rst - efficient/index.rst - embedding_block/index.rst - interaction_block/index.rst - radial_basis/index.rst - spherical_basis/index.rst - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.rst deleted file mode 100644 index 557d73eea..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.rst +++ /dev/null @@ -1,92 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.interaction_block` -============================================================== - -.. py:module:: fairchem.core.models.gemnet.layers.interaction_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.interaction_block.InteractionBlockTripletsOnly - fairchem.core.models.gemnet.layers.interaction_block.TripletInteraction - - - - -.. py:class:: InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction') - - - Bases: :py:obj:`torch.nn.Module` - - Interaction block for GemNet-T/dT. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - - .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t) - - :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. - * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). - - - -.. py:class:: TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs) - - - Bases: :py:obj:`torch.nn.Module` - - Triplet-based message passing block. - - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf. - :type emb_size_trip: int - :param emb_size_bilinear: Embedding size of the edge embeddings after the bilinear layer. - :type emb_size_bilinear: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - - .. py:method:: forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca) - - :returns: **m** -- Edge embeddings (c->a). - :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.rst deleted file mode 100644 index a77ae3108..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.rst +++ /dev/null @@ -1,114 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.radial_basis` -========================================================= - -.. py:module:: fairchem.core.models.gemnet.layers.radial_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.radial_basis.PolynomialEnvelope - fairchem.core.models.gemnet.layers.radial_basis.ExponentialEnvelope - fairchem.core.models.gemnet.layers.radial_basis.SphericalBesselBasis - fairchem.core.models.gemnet.layers.radial_basis.BernsteinBasis - fairchem.core.models.gemnet.layers.radial_basis.RadialBasis - - - - -.. py:class:: PolynomialEnvelope(exponent: int) - - - Bases: :py:obj:`torch.nn.Module` - - Polynomial envelope function that ensures a smooth cutoff. - - :param exponent: Exponent of the envelope function. - :type exponent: int - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: ExponentialEnvelope - - - Bases: :py:obj:`torch.nn.Module` - - Exponential envelope function that ensures a smooth cutoff, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) - - - Bases: :py:obj:`torch.nn.Module` - - 1D spherical Bessel basis - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) - - - Bases: :py:obj:`torch.nn.Module` - - Bernstein polynomial basis, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param pregamma_initial: Initial value of exponential coefficient gamma. - Default: gamma = 0.5 * a_0**-1 = 0.94486, - inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 - :type pregamma_initial: float - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - :param rbf: Basis function and its hyperparameters. - :type rbf: dict = {"name": "gaussian"} - :param envelope: Envelope function and its hyperparameters. - :type envelope: dict = {"name": "polynomial", "exponent": 5} - - .. py:method:: forward(d) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.rst deleted file mode 100644 index ce902370e..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.layers.spherical_basis` -============================================================ - -.. py:module:: fairchem.core.models.gemnet.layers.spherical_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.layers.spherical_basis.CircularBasisLayer - - - - -.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet.layers.radial_basis.RadialBasis, cbf, efficient: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - 2D Fourier Bessel Basis - - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param radial_basis: Radial basis functions - :type radial_basis: RadialBasis - :param cbf: Name and hyperparameters of the cosine basis function - :type cbf: dict - :param efficient: Whether to use the "efficient" summation order - :type efficient: bool - - .. py:method:: forward(D_ca, cosφ_cab, id3_ca) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet/utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet/utils/index.rst deleted file mode 100644 index ee921c574..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet/utils/index.rst +++ /dev/null @@ -1,123 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet.utils` -=========================================== - -.. py:module:: fairchem.core.models.gemnet.utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet.utils.read_json - fairchem.core.models.gemnet.utils.update_json - fairchem.core.models.gemnet.utils.write_json - fairchem.core.models.gemnet.utils.read_value_json - fairchem.core.models.gemnet.utils.ragged_range - fairchem.core.models.gemnet.utils.repeat_blocks - fairchem.core.models.gemnet.utils.calculate_interatomic_vectors - fairchem.core.models.gemnet.utils.inner_product_normalized - fairchem.core.models.gemnet.utils.mask_neighbors - - - -.. py:function:: read_json(path: str) - - -.. py:function:: update_json(path: str, data) -> None - - -.. py:function:: write_json(path: str, data) -> None - - -.. py:function:: read_value_json(path: str, key: str) - - -.. py:function:: ragged_range(sizes: torch.Tensor) -> torch.Tensor - - Multiple concatenated ranges. - - .. rubric:: Examples - - sizes = [1 4 2 3] - Return: [0 0 1 2 3 0 1 0 1 2] - - -.. py:function:: repeat_blocks(sizes: torch.Tensor, repeats: int | torch.Tensor, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor - - Repeat blocks of indices. - Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements - - continuous_indexing: Whether to keep increasing the index after each block - start_idx: Starting index - block_inc: Number to increment by after each block, - either global or per block. Shape: len(sizes) - 1 - repeat_inc: Number to increment by after each repetition, - either global or per block - - .. rubric:: Examples - - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False - Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - repeat_inc = 4 - Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - start_idx = 5 - Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - block_inc = 1 - Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] - sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 1 2 0 1 2 3 4 3 4 3 4] - sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True - Return: [0 1 0 1 5 6 5 6] - - -.. py:function:: calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor] - - Calculate the vectors connecting the given atom pairs, - considering offsets from periodic boundary conditions (PBC). - - :param R: Atom positions. - :type R: Tensor, shape = (nAtoms, 3) - :param id_s: Indices of the source atom of the edges. - :type id_s: Tensor, shape = (nEdges,) - :param id_t: Indices of the target atom of the edges. - :type id_t: Tensor, shape = (nEdges,) - :param offsets_st: PBC offsets of the edges. - Subtract this from the correct direction. - :type offsets_st: Tensor, shape = (nEdges,) - - :returns: **(D_st, V_st)** -- - - D_st: Tensor, shape = (nEdges,) - Distance from atom t to s. - V_st: Tensor, shape = (nEdges,) - Unit direction from atom t to s. - :rtype: tuple - - -.. py:function:: inner_product_normalized(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor - - Calculate the inner product between the given normalized vectors, - giving a result between -1 and 1. - - -.. py:function:: mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) -> torch.Tensor - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.rst deleted file mode 100644 index 99fbd9c3a..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.rst +++ /dev/null @@ -1,127 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.gemnet` -=============================================== - -.. py:module:: fairchem.core.models.gemnet_gp.gemnet - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.gemnet.GraphParallelGemNetT - - - - -.. py:class:: GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - GemNet-T, triplets-only variant of GemNet - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param cutoff: Embedding cutoff for interactomic directions in Angstrom. - :type cutoff: float - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the cosine basis function. - :type cbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the json file containing the scaling factors. - :type scale_file: str - - .. py:property:: num_params - - - .. py:method:: get_triplets(edge_index, num_atoms) - - Get all b->a for each edge c->a. - It is possible that b=c, as long as the edges are distinct. - - :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c - * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c - * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor - - - .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) - - Reorder edges to make finding counter-directional edges easier. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. Since we only use i->j - edges here, we lose some j->i edges and add others by - making it symmetric. - We could fix this by merging edge_index with its counter-edges, - including the cell_offsets, and then running torch.unique. - But this does not seem worth it. - - - .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) - - - .. py:method:: generate_interaction_graph(data) - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/index.rst deleted file mode 100644 index 092aee307..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/index.rst +++ /dev/null @@ -1,139 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp` -======================================== - -.. py:module:: fairchem.core.models.gemnet_gp - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - layers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - gemnet/index.rst - initializers/index.rst - utils/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.GraphParallelGemNetT - - - - -.. py:class:: GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - GemNet-T, triplets-only variant of GemNet - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param cutoff: Embedding cutoff for interactomic directions in Angstrom. - :type cutoff: float - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the cosine basis function. - :type cbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the json file containing the scaling factors. - :type scale_file: str - - .. py:property:: num_params - - - .. py:method:: get_triplets(edge_index, num_atoms) - - Get all b->a for each edge c->a. - It is possible that b=c, as long as the edges are distinct. - - :returns: * **id3_ba** (*torch.Tensor, shape (num_triplets,)*) -- Indices of input edge b->a of each triplet b->a<-c - * **id3_ca** (*torch.Tensor, shape (num_triplets,)*) -- Indices of output edge c->a of each triplet b->a<-c - * **id3_ragged_idx** (*torch.Tensor, shape (num_triplets,)*) -- Indices enumerating the copies of id3_ca for creating a padded matrix - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor - - - .. py:method:: reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector) - - Reorder edges to make finding counter-directional edges easier. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. Since we only use i->j - edges here, we lose some j->i edges and add others by - making it symmetric. - We could fix this by merging edge_index with its counter-edges, - including the cell_offsets, and then running torch.unique. - But this does not seem worth it. - - - .. py:method:: select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None) - - - .. py:method:: generate_interaction_graph(data) - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/initializers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/initializers/index.rst deleted file mode 100644 index 869d770df..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/initializers/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.initializers` -===================================================== - -.. py:module:: fairchem.core.models.gemnet_gp.initializers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.initializers._standardize - fairchem.core.models.gemnet_gp.initializers.he_orthogonal_init - - - -.. py:function:: _standardize(kernel) - - Makes sure that N*Var(W) = 1 and E[W] = 0 - - -.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor - - Generate a weight matrix with variance according to He (Kaiming) initialization. - Based on a random (semi-)orthogonal matrix neural networks - are expected to learn better when features are decorrelated - (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", - "Dropout: a simple way to prevent neural networks from overfitting", - "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.rst deleted file mode 100644 index 58591fa71..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.rst +++ /dev/null @@ -1,116 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.atom_update_block` -================================================================= - -.. py:module:: fairchem.core.models.gemnet_gp.layers.atom_update_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.atom_update_block.AtomUpdateBlock - fairchem.core.models.gemnet_gp.layers.atom_update_block.OutputBlock - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.atom_update_block.scatter_sum - - - -.. py:function:: scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1, out: torch.Tensor | None = None, dim_size: int | None = None) -> torch.Tensor - - Clone of torch_scatter.scatter_sum but without in-place operations - - -.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation: str | None = None, name: str = 'atom_update') - - - Bases: :py:obj:`torch.nn.Module` - - Aggregate the message embeddings of the atoms - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_atom: Embedding size of the edges. - :type emb_size_atom: int - :param nHidden: Number of residual blocks. - :type nHidden: int - :param activation: Name of the activation function to use in the dense layers. - :type activation: callable/str - - .. py:method:: get_mlp(units_in: int, units: int, nHidden: int, activation: str | None) - - - .. py:method:: forward(nAtoms: int, m: int, rbf, id_j) - - :returns: **h** -- Atom embedding. - :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) - - - -.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation: str | None = None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs) - - - Bases: :py:obj:`AtomUpdateBlock` - - Combines the atom update block and subsequent final dense layer. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_atom: Embedding size of the edges. - :type emb_size_atom: int - :param nHidden: Number of residual blocks. - :type nHidden: int - :param num_targets: Number of targets. - :type num_targets: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - :param direct_forces: If true directly predict forces without taking the gradient of the energy potential. - :type direct_forces: bool - :param output_init: Kernel initializer of the final dense layer. - :type output_init: int - - .. py:attribute:: dense_rbf_F - :type: fairchem.core.models.gemnet_gp.layers.base_layers.Dense - - - - .. py:attribute:: out_forces - :type: fairchem.core.models.gemnet_gp.layers.base_layers.Dense - - - - .. py:attribute:: out_energy - :type: fairchem.core.models.gemnet_gp.layers.base_layers.Dense - - - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(nAtoms: int, m, rbf, id_j: torch.Tensor) - - :returns: * **(E, F)** (*tuple*) - * **- E** (*torch.Tensor, shape=(nAtoms, num_targets)*) - * **- F** (*torch.Tensor, shape=(nEdges, num_targets)*) - * *Energy and force prediction* - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.rst deleted file mode 100644 index 745291508..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.rst +++ /dev/null @@ -1,149 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.base_layers` -=========================================================== - -.. py:module:: fairchem.core.models.gemnet_gp.layers.base_layers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.base_layers.Dense - fairchem.core.models.gemnet_gp.layers.base_layers.ScaledSiLU - fairchem.core.models.gemnet_gp.layers.base_layers.SiQU - fairchem.core.models.gemnet_gp.layers.base_layers.ResidualLayer - - - - -.. py:class:: Dense(num_in_features: int, num_out_features: int, bias: bool = False, activation: str | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Combines dense layer with scaling for swish activation. - - :param units: Output embedding size. - :type units: int - :param activation: Name of the activation function to use. - :type activation: str - :param bias: True if use bias. - :type bias: bool - - .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None - - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: ScaledSiLU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: SiQU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) - - - Bases: :py:obj:`torch.nn.Module` - - Residual block with output scaled by 1/sqrt(2). - - :param units: Output embedding size. - :type units: int - :param nLayers: Number of dense layers. - :type nLayers: int - :param layer_kwargs: Keyword arguments for initializing the layers. - :type layer_kwargs: str - - .. py:method:: forward(input: torch.Tensor) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.rst deleted file mode 100644 index f742b5c52..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.rst +++ /dev/null @@ -1,115 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.basis_utils` -=========================================================== - -.. py:module:: fairchem.core.models.gemnet_gp.layers.basis_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.basis_utils.Jn - fairchem.core.models.gemnet_gp.layers.basis_utils.Jn_zeros - fairchem.core.models.gemnet_gp.layers.basis_utils.spherical_bessel_formulas - fairchem.core.models.gemnet_gp.layers.basis_utils.bessel_basis - fairchem.core.models.gemnet_gp.layers.basis_utils.sph_harm_prefactor - fairchem.core.models.gemnet_gp.layers.basis_utils.associated_legendre_polynomials - fairchem.core.models.gemnet_gp.layers.basis_utils.real_sph_harm - - - -.. py:function:: Jn(r: float, n: int) - - numerical spherical bessel functions of order n - - -.. py:function:: Jn_zeros(n: int, k: int) - - Compute the first k zeros of the spherical bessel functions up to order n (excluded) - - -.. py:function:: spherical_bessel_formulas(n) - - Computes the sympy formulas for the spherical bessel functions up to order n (excluded) - - -.. py:function:: bessel_basis(n: int, k: int) - - Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to - order n (excluded) and maximum frequency k (excluded). - - :returns: - - list - Bessel basis formulas taking in a single argument x. - Has length n where each element has length k. -> In total n*k many. - :rtype: bess_basis - - -.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) -> float - - Computes the constant pre-factor for the spherical harmonic of degree l and order m. - - :param l_degree: Degree of the spherical harmonic. l >= 0 - :type l_degree: int - :param m_order: Order of the spherical harmonic. -l <= m <= l - :type m_order: int - - :returns: **factor** - :rtype: float - - -.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) - - Computes string formulas of the associated legendre polynomials up to degree L (excluded). - - :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials (degree L is excluded). - :type L_maxdegree: int - :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. - :type zero_m_only: bool - :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only. - :type pos_m_only: bool - - :returns: **polynomials** -- Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many). - :rtype: list - - -.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) - - Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). - Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. - - :param L_maxdegree: Degree up to which to calculate the spherical harmonics (degree L is excluded). - :type L_maxdegree: int - :param use_theta: - - True: Expects the input of the formula strings to contain theta. - - False: Expects the input of the formula strings to contain z. - :type use_theta: bool - :param use_phi: - - True: Expects the input of the formula strings to contain phi. - - False: Expects the input of the formula strings to contain x and y. - Does nothing if zero_m_only is True - :type use_phi: bool - :param zero_m_only: If True only calculate the harmonics where m=0. - :type zero_m_only: bool - - :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical harmonics up - to degree L (where degree L is not excluded). - In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then - the total count is reduced to be only L many. - :rtype: list - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.rst deleted file mode 100644 index f015b1a0b..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.rst +++ /dev/null @@ -1,85 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.efficient` -========================================================= - -.. py:module:: fairchem.core.models.gemnet_gp.layers.efficient - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.efficient.EfficientInteractionDownProjection - fairchem.core.models.gemnet_gp.layers.efficient.EfficientInteractionBilinear - - - - -.. py:class:: EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int) - - - Bases: :py:obj:`torch.nn.Module` - - Down projection in the efficient reformulation. - - :param emb_size_interm: Intermediate embedding size (down-projection size). - :type emb_size_interm: int - :param kernel_initializer: Initializer of the weight matrix. - :type kernel_initializer: callable - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(rbf: torch.Tensor, sph: torch.Tensor, id_ca, id_ragged_idx, Kmax: int) -> tuple[torch.Tensor, torch.Tensor] - - :param rbf: - :type rbf: torch.Tensor, shape=(1, nEdges, num_radial) - :param sph: - :type sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) - :param id_ca: - :param id_ragged_idx: - - :returns: * **rbf_W1** (*torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)*) - * **sph** (*torch.Tensor, shape=(nEdges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges - - - -.. py:class:: EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int) - - - Bases: :py:obj:`torch.nn.Module` - - Efficient reformulation of the bilinear layer and subsequent summation. - - :param units_out: Embedding output size of the bilinear layer. - :type units_out: int - :param kernel_initializer: Initializer of the weight matrix. - :type kernel_initializer: callable - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(basis: tuple[torch.Tensor, torch.Tensor], m, id_reduce, id_ragged_idx, edge_offset, Kmax: int) -> torch.Tensor - - :param basis: - :param m: - :type m: quadruplets: m = m_db , triplets: m = m_ba - :param id_reduce: - :param id_ragged_idx: - - :returns: **m_ca** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, units_out) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.rst deleted file mode 100644 index ec3b497a0..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.rst +++ /dev/null @@ -1,70 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.embedding_block` -=============================================================== - -.. py:module:: fairchem.core.models.gemnet_gp.layers.embedding_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.embedding_block.AtomEmbedding - fairchem.core.models.gemnet_gp.layers.embedding_block.EdgeEmbedding - - - - -.. py:class:: AtomEmbedding(emb_size: int) - - - Bases: :py:obj:`torch.nn.Module` - - Initial atom embeddings based on the atom type - - :param emb_size: Atom embeddings size - :type emb_size: int - - .. py:method:: forward(Z) -> torch.Tensor - - :returns: **h** -- Atom embeddings. - :rtype: torch.Tensor, shape=(nAtoms, emb_size) - - - -.. py:class:: EdgeEmbedding(atom_features: int, edge_features: int, num_out_features: int, activation: str | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Edge embedding based on the concatenation of atom embeddings and subsequent dense layer. - - :param emb_size: Embedding size after the dense layer. - :type emb_size: int - :param activation: Activation function used in the dense layer. - :type activation: str - - .. py:method:: forward(h, m_rbf, idx_s, idx_t) -> torch.Tensor - - :param h: - :param m_rbf: in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st - :type m_rbf: shape (nEdges, nFeatures) - :param idx_s: - :param idx_t: - - :returns: **m_st** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, emb_size) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/index.rst deleted file mode 100644 index 20dce5bec..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers` -=============================================== - -.. py:module:: fairchem.core.models.gemnet_gp.layers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - atom_update_block/index.rst - base_layers/index.rst - basis_utils/index.rst - efficient/index.rst - embedding_block/index.rst - interaction_block/index.rst - radial_basis/index.rst - spherical_basis/index.rst - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.rst deleted file mode 100644 index 7de69b3c9..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.rst +++ /dev/null @@ -1,95 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.interaction_block` -================================================================= - -.. py:module:: fairchem.core.models.gemnet_gp.layers.interaction_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.interaction_block.InteractionBlockTripletsOnly - fairchem.core.models.gemnet_gp.layers.interaction_block.TripletInteraction - - - - -.. py:class:: InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction') - - - Bases: :py:obj:`torch.nn.Module` - - Interaction block for GemNet-T/dT. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size in the triplet message passing block. - :type emb_size_trip: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_bil_trip: Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer. - :type emb_size_bil_trip: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - - .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t, edge_offset, Kmax, nAtoms) - - :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. - * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). - * **Node** (*h*) - * **Edge** (*m, rbf3, id_swap, rbf_h, idx_s, idx_t, cbf3[0], cbf3[1] (dense)*) - * **Triplet** (*id3_ragged_idx, id3_ba, id3_ca*) - - - -.. py:class:: TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs) - - - Bases: :py:obj:`torch.nn.Module` - - Triplet-based message passing block. - - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip: (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf. - :type emb_size_trip: int - :param emb_size_bilinear: Embedding size of the edge embeddings after the bilinear layer. - :type emb_size_bilinear: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param activation: Name of the activation function to use in the dense layers except for the final dense layer. - :type activation: str - - .. py:method:: forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, edge_offset, Kmax) - - :returns: **m** -- Edge embeddings (c->a). - :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.rst deleted file mode 100644 index 1506306c0..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.rst +++ /dev/null @@ -1,114 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.radial_basis` -============================================================ - -.. py:module:: fairchem.core.models.gemnet_gp.layers.radial_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.radial_basis.PolynomialEnvelope - fairchem.core.models.gemnet_gp.layers.radial_basis.ExponentialEnvelope - fairchem.core.models.gemnet_gp.layers.radial_basis.SphericalBesselBasis - fairchem.core.models.gemnet_gp.layers.radial_basis.BernsteinBasis - fairchem.core.models.gemnet_gp.layers.radial_basis.RadialBasis - - - - -.. py:class:: PolynomialEnvelope(exponent: int) - - - Bases: :py:obj:`torch.nn.Module` - - Polynomial envelope function that ensures a smooth cutoff. - - :param exponent: Exponent of the envelope function. - :type exponent: int - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: ExponentialEnvelope - - - Bases: :py:obj:`torch.nn.Module` - - Exponential envelope function that ensures a smooth cutoff, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - .. py:method:: forward(d_scaled) -> torch.Tensor - - - -.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) - - - Bases: :py:obj:`torch.nn.Module` - - 1D spherical Bessel basis - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - - .. py:method:: forward(d_scaled) - - - -.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) - - - Bases: :py:obj:`torch.nn.Module` - - Bernstein polynomial basis, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param pregamma_initial: Initial value of exponential coefficient gamma. - Default: gamma = 0.5 * a_0**-1 = 0.94486, - inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 - :type pregamma_initial: float - - .. py:method:: forward(d_scaled) -> torch.Tensor - - - -.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - :param rbf: Basis function and its hyperparameters. - :type rbf: dict = {"name": "gaussian"} - :param envelope: Envelope function and its hyperparameters. - :type envelope: dict = {"name": "polynomial", "exponent": 5} - - .. py:method:: forward(d) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.rst deleted file mode 100644 index 29b78eaf3..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.layers.spherical_basis` -=============================================================== - -.. py:module:: fairchem.core.models.gemnet_gp.layers.spherical_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.layers.spherical_basis.CircularBasisLayer - - - - -.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_gp.layers.radial_basis.RadialBasis, cbf, efficient: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - 2D Fourier Bessel Basis - - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param radial_basis: Radial basis functions - :type radial_basis: RadialBasis - :param cbf: Name and hyperparameters of the cosine basis function - :type cbf: dict - :param efficient: Whether to use the "efficient" summation order - :type efficient: bool - - .. py:method:: forward(D_ca, cosφ_cab, id3_ca) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_gp/utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_gp/utils/index.rst deleted file mode 100644 index 76d3f6310..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_gp/utils/index.rst +++ /dev/null @@ -1,123 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_gp.utils` -============================================== - -.. py:module:: fairchem.core.models.gemnet_gp.utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_gp.utils.read_json - fairchem.core.models.gemnet_gp.utils.update_json - fairchem.core.models.gemnet_gp.utils.write_json - fairchem.core.models.gemnet_gp.utils.read_value_json - fairchem.core.models.gemnet_gp.utils.ragged_range - fairchem.core.models.gemnet_gp.utils.repeat_blocks - fairchem.core.models.gemnet_gp.utils.calculate_interatomic_vectors - fairchem.core.models.gemnet_gp.utils.inner_product_normalized - fairchem.core.models.gemnet_gp.utils.mask_neighbors - - - -.. py:function:: read_json(path: str) - - -.. py:function:: update_json(path: str, data) -> None - - -.. py:function:: write_json(path: str, data) -> None - - -.. py:function:: read_value_json(path: str, key) - - -.. py:function:: ragged_range(sizes) - - Multiple concatenated ranges. - - .. rubric:: Examples - - sizes = [1 4 2 3] - Return: [0 0 1 2 3 0 1 0 1 2] - - -.. py:function:: repeat_blocks(sizes: torch.Tensor, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor - - Repeat blocks of indices. - Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements - - continuous_indexing: Whether to keep increasing the index after each block - start_idx: Starting index - block_inc: Number to increment by after each block, - either global or per block. Shape: len(sizes) - 1 - repeat_inc: Number to increment by after each repetition, - either global or per block - - .. rubric:: Examples - - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False - Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - repeat_inc = 4 - Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - start_idx = 5 - Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - block_inc = 1 - Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] - sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 1 2 0 1 2 3 4 3 4 3 4] - sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True - Return: [0 1 0 1 5 6 5 6] - - -.. py:function:: calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor | None) -> tuple[torch.Tensor, torch.Tensor] - - Calculate the vectors connecting the given atom pairs, - considering offsets from periodic boundary conditions (PBC). - - :param R: Atom positions. - :type R: Tensor, shape = (nAtoms, 3) - :param id_s: Indices of the source atom of the edges. - :type id_s: Tensor, shape = (nEdges,) - :param id_t: Indices of the target atom of the edges. - :type id_t: Tensor, shape = (nEdges,) - :param offsets_st: PBC offsets of the edges. - Subtract this from the correct direction. - :type offsets_st: Tensor, shape = (nEdges,) - - :returns: **(D_st, V_st)** -- - - D_st: Tensor, shape = (nEdges,) - Distance from atom t to s. - V_st: Tensor, shape = (nEdges,) - Unit direction from atom t to s. - :rtype: tuple - - -.. py:function:: inner_product_normalized(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor - - Calculate the inner product between the given normalized vectors, - giving a result between -1 and 1. - - -.. py:function:: mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.rst deleted file mode 100644 index f7d424db7..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.rst +++ /dev/null @@ -1,249 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.gemnet_oc` -================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.gemnet_oc - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.gemnet_oc.GemNetOC - - - - -.. py:class:: GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_trip_in: int - :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_trip_out: int - :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_quad_in: int - :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_quad_out: int - :param emb_size_aint_in: Embedding size in the atom interaction before the bilinear layer. - :type emb_size_aint_in: int - :param emb_size_aint_out: Embedding size in the atom interaction after the bilinear layer. - :type emb_size_aint_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). - :type emb_size_sbf: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param num_output_afteratom: Number of residual blocks in the output blocks - after adding the atom embedding. - :type num_output_afteratom: int - :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. - :type num_atom_emb_layers: int - :param num_global_out_layers: Number of final residual blocks before the output. - :type num_global_out_layers: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param use_pbc: Whether to use periodic boundary conditions. - :type use_pbc: bool - :param scale_backprop_forces: Whether to scale up the energy and then scales down the forces - to prevent NaNs and infs in backpropagated forces. - :type scale_backprop_forces: bool - :param cutoff: Embedding cutoff for interatomic connections and embeddings in Angstrom. - :type cutoff: float - :param cutoff_qint: Quadruplet interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - :type cutoff_qint: float - :param cutoff_aeaint: Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - :type cutoff_aeaint: float - :param cutoff_aint: Atom-to-atom interaction cutoff in Angstrom. - Optional. Uses maximum of all other cutoffs per default. - :type cutoff_aint: float - :param max_neighbors: Maximum number of neighbors for interatomic connections and embeddings. - :type max_neighbors: int - :param max_neighbors_qint: Maximum number of quadruplet interactions per embedding. - Optional. Uses max_neighbors per default. - :type max_neighbors_qint: int - :param max_neighbors_aeaint: Maximum number of edge-to-atom and atom-to-edge interactions per embedding. - Optional. Uses max_neighbors per default. - :type max_neighbors_aeaint: int - :param max_neighbors_aint: Maximum number of atom-to-atom interactions per atom. - Optional. Uses maximum of all other neighbors per default. - :type max_neighbors_aint: int - :param enforce_max_neighbors_strictly: When subselected edges based on max_neighbors args, arbitrarily - select amongst degenerate edges to have exactly the correct number. - :type enforce_max_neighbors_strictly: bool - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param rbf_spherical: Name and hyperparameters of the radial basis function used as part of the - circular and spherical bases. - Optional. Uses rbf per default. - :type rbf_spherical: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the circular basis function. - :type cbf: dict - :param sbf: Name and hyperparameters of the spherical basis function. - :type sbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param forces_coupled: If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. - :type forces_coupled: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the pytorch file containing the scaling factors. - :type scale_file: str - :param quad_interaction: Whether to use quadruplet interactions (with dihedral angles) - :type quad_interaction: bool - :param atom_edge_interaction: Whether to use atom-to-edge interactions - :type atom_edge_interaction: bool - :param edge_atom_interaction: Whether to use edge-to-atom interactions - :type edge_atom_interaction: bool - :param atom_interaction: Whether to use atom-to-atom interactions - :type atom_interaction: bool - :param scale_basis: Whether to use a scaling layer in the raw basis function for better - numerical stability. - :type scale_basis: bool - :param qint_tags: Which atom tags to use quadruplet interactions for. - 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. - :type qint_tags: list - - .. py:property:: num_params - :type: int - - - .. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) - - - .. py:method:: set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint) - - - .. py:method:: init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis) - - - .. py:method:: init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf) - - - .. py:method:: calculate_quad_angles(V_st, V_qint_st, quad_idx) - - Calculate angles for quadruplet-based message passing. - - :param V_st: Normalized directions from s to t - :type V_st: Tensor, shape = (nAtoms, 3) - :param V_qint_st: Normalized directions from s to t for the quadruplet - interaction graph - :type V_qint_st: Tensor, shape = (nAtoms, 3) - :param quad_idx: Indices relevant for quadruplet interactions. - :type quad_idx: dict of torch.Tensor - - :returns: * **cosφ_cab** (*Tensor, shape = (num_triplets_inint,)*) -- Cosine of angle between atoms c -> a <- b. - * **cosφ_abd** (*Tensor, shape = (num_triplets_qint,)*) -- Cosine of angle between atoms a -> b -> d. - * **angle_cabd** (*Tensor, shape = (num_quadruplets,)*) -- Dihedral angle between atoms c <- a-b -> d. - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) -> torch.Tensor - - Use a mask to remove values of removed edges and then - duplicate the values for the correct edge direction. - - :param tensor: Values to symmetrize for the new tensor. - :type tensor: torch.Tensor - :param mask: Mask defining which edges go in the correct direction. - :type mask: torch.Tensor - :param reorder_idx: Indices defining how to reorder the tensor values after - concatenating the edge values of both directions. - :type reorder_idx: torch.Tensor - :param opposite_neg: Whether the edge in the opposite direction should use the - negative tensor value. - :type opposite_neg: bool - - :returns: **tensor_ordered** -- A tensor with symmetrized values. - :rtype: torch.Tensor - - - .. py:method:: symmetrize_edges(graph, batch_idx) - - Symmetrize edges to ensure existence of counter-directional edges. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. - We only use i->j edges here. So we lose some j->i edges - and add others by making it symmetric. - - - .. py:method:: subselect_edges(data, graph, cutoff=None, max_neighbors=None) - - Subselect edges using a stricter cutoff and max_neighbors. - - - .. py:method:: generate_graph_dict(data, cutoff, max_neighbors) - - Generate a radius/nearest neighbor graph. - - - .. py:method:: subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig) - - If the new cutoff and max_neighbors is different from the original, - subselect the edges of a given graph. - - - .. py:method:: get_graphs_and_indices(data) - - "Generate embedding and interaction graphs and indices. - - - .. py:method:: get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms) - - Calculate and transform basis functions. - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/index.rst deleted file mode 100644 index 97f6182f0..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/index.rst +++ /dev/null @@ -1,263 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc` -======================================== - -.. py:module:: fairchem.core.models.gemnet_oc - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - layers/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - gemnet_oc/index.rst - initializers/index.rst - interaction_indices/index.rst - utils/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.GemNetOC - - - - -.. py:class:: GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - :param num_atoms (int): - :type num_atoms (int): Unused argument - :param bond_feat_dim (int): - :type bond_feat_dim (int): Unused argument - :param num_targets: Number of prediction targets. - :type num_targets: int - :param num_spherical: Controls maximum frequency. - :type num_spherical: int - :param num_radial: Controls maximum frequency. - :type num_radial: int - :param num_blocks: Number of building blocks to be stacked. - :type num_blocks: int - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_trip_in: int - :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_trip_out: int - :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_quad_in: int - :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_quad_out: int - :param emb_size_aint_in: Embedding size in the atom interaction before the bilinear layer. - :type emb_size_aint_in: int - :param emb_size_aint_out: Embedding size in the atom interaction after the bilinear layer. - :type emb_size_aint_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). - :type emb_size_sbf: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param num_output_afteratom: Number of residual blocks in the output blocks - after adding the atom embedding. - :type num_output_afteratom: int - :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. - :type num_atom_emb_layers: int - :param num_global_out_layers: Number of final residual blocks before the output. - :type num_global_out_layers: int - :param regress_forces: Whether to predict forces. Default: True - :type regress_forces: bool - :param direct_forces: If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - :type direct_forces: bool - :param use_pbc: Whether to use periodic boundary conditions. - :type use_pbc: bool - :param scale_backprop_forces: Whether to scale up the energy and then scales down the forces - to prevent NaNs and infs in backpropagated forces. - :type scale_backprop_forces: bool - :param cutoff: Embedding cutoff for interatomic connections and embeddings in Angstrom. - :type cutoff: float - :param cutoff_qint: Quadruplet interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - :type cutoff_qint: float - :param cutoff_aeaint: Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - :type cutoff_aeaint: float - :param cutoff_aint: Atom-to-atom interaction cutoff in Angstrom. - Optional. Uses maximum of all other cutoffs per default. - :type cutoff_aint: float - :param max_neighbors: Maximum number of neighbors for interatomic connections and embeddings. - :type max_neighbors: int - :param max_neighbors_qint: Maximum number of quadruplet interactions per embedding. - Optional. Uses max_neighbors per default. - :type max_neighbors_qint: int - :param max_neighbors_aeaint: Maximum number of edge-to-atom and atom-to-edge interactions per embedding. - Optional. Uses max_neighbors per default. - :type max_neighbors_aeaint: int - :param max_neighbors_aint: Maximum number of atom-to-atom interactions per atom. - Optional. Uses maximum of all other neighbors per default. - :type max_neighbors_aint: int - :param enforce_max_neighbors_strictly: When subselected edges based on max_neighbors args, arbitrarily - select amongst degenerate edges to have exactly the correct number. - :type enforce_max_neighbors_strictly: bool - :param rbf: Name and hyperparameters of the radial basis function. - :type rbf: dict - :param rbf_spherical: Name and hyperparameters of the radial basis function used as part of the - circular and spherical bases. - Optional. Uses rbf per default. - :type rbf_spherical: dict - :param envelope: Name and hyperparameters of the envelope function. - :type envelope: dict - :param cbf: Name and hyperparameters of the circular basis function. - :type cbf: dict - :param sbf: Name and hyperparameters of the spherical basis function. - :type sbf: dict - :param extensive: Whether the output should be extensive (proportional to the number of atoms) - :type extensive: bool - :param forces_coupled: If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. - :type forces_coupled: bool - :param output_init: Initialization method for the final dense layer. - :type output_init: str - :param activation: Name of the activation function. - :type activation: str - :param scale_file: Path to the pytorch file containing the scaling factors. - :type scale_file: str - :param quad_interaction: Whether to use quadruplet interactions (with dihedral angles) - :type quad_interaction: bool - :param atom_edge_interaction: Whether to use atom-to-edge interactions - :type atom_edge_interaction: bool - :param edge_atom_interaction: Whether to use edge-to-atom interactions - :type edge_atom_interaction: bool - :param atom_interaction: Whether to use atom-to-atom interactions - :type atom_interaction: bool - :param scale_basis: Whether to use a scaling layer in the raw basis function for better - numerical stability. - :type scale_basis: bool - :param qint_tags: Which atom tags to use quadruplet interactions for. - 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. - :type qint_tags: list - - .. py:property:: num_params - :type: int - - - .. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) - - - .. py:method:: set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint) - - - .. py:method:: init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis) - - - .. py:method:: init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf) - - - .. py:method:: calculate_quad_angles(V_st, V_qint_st, quad_idx) - - Calculate angles for quadruplet-based message passing. - - :param V_st: Normalized directions from s to t - :type V_st: Tensor, shape = (nAtoms, 3) - :param V_qint_st: Normalized directions from s to t for the quadruplet - interaction graph - :type V_qint_st: Tensor, shape = (nAtoms, 3) - :param quad_idx: Indices relevant for quadruplet interactions. - :type quad_idx: dict of torch.Tensor - - :returns: * **cosφ_cab** (*Tensor, shape = (num_triplets_inint,)*) -- Cosine of angle between atoms c -> a <- b. - * **cosφ_abd** (*Tensor, shape = (num_triplets_qint,)*) -- Cosine of angle between atoms a -> b -> d. - * **angle_cabd** (*Tensor, shape = (num_quadruplets,)*) -- Dihedral angle between atoms c <- a-b -> d. - - - .. py:method:: select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) -> torch.Tensor - - Use a mask to remove values of removed edges and then - duplicate the values for the correct edge direction. - - :param tensor: Values to symmetrize for the new tensor. - :type tensor: torch.Tensor - :param mask: Mask defining which edges go in the correct direction. - :type mask: torch.Tensor - :param reorder_idx: Indices defining how to reorder the tensor values after - concatenating the edge values of both directions. - :type reorder_idx: torch.Tensor - :param opposite_neg: Whether the edge in the opposite direction should use the - negative tensor value. - :type opposite_neg: bool - - :returns: **tensor_ordered** -- A tensor with symmetrized values. - :rtype: torch.Tensor - - - .. py:method:: symmetrize_edges(graph, batch_idx) - - Symmetrize edges to ensure existence of counter-directional edges. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. - We only use i->j edges here. So we lose some j->i edges - and add others by making it symmetric. - - - .. py:method:: subselect_edges(data, graph, cutoff=None, max_neighbors=None) - - Subselect edges using a stricter cutoff and max_neighbors. - - - .. py:method:: generate_graph_dict(data, cutoff, max_neighbors) - - Generate a radius/nearest neighbor graph. - - - .. py:method:: subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig) - - If the new cutoff and max_neighbors is different from the original, - subselect the edges of a given graph. - - - .. py:method:: get_graphs_and_indices(data) - - "Generate embedding and interaction graphs and indices. - - - .. py:method:: get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms) - - Calculate and transform basis functions. - - - .. py:method:: forward(data) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/initializers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/initializers/index.rst deleted file mode 100644 index 083c0702c..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/initializers/index.rst +++ /dev/null @@ -1,58 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.initializers` -===================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.initializers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.initializers._standardize - fairchem.core.models.gemnet_oc.initializers.he_orthogonal_init - fairchem.core.models.gemnet_oc.initializers.grid_init - fairchem.core.models.gemnet_oc.initializers.log_grid_init - fairchem.core.models.gemnet_oc.initializers.get_initializer - - - -.. py:function:: _standardize(kernel) - - Makes sure that N*Var(W) = 1 and E[W] = 0 - - -.. py:function:: he_orthogonal_init(tensor: torch.Tensor) -> torch.Tensor - - Generate a weight matrix with variance according to He (Kaiming) initialization. - Based on a random (semi-)orthogonal matrix neural networks - are expected to learn better when features are decorrelated - (stated by eg. "Reducing overfitting in deep networks by decorrelating representations", - "Dropout: a simple way to prevent neural networks from overfitting", - "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks") - - -.. py:function:: grid_init(tensor: torch.Tensor, start: int = -1, end: int = 1) -> torch.Tensor - - Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end. - - -.. py:function:: log_grid_init(tensor: torch.Tensor, start: int = -4, end: int = 0) -> torch.Tensor - - Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end. - - -.. py:function:: get_initializer(name, **init_kwargs) - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.rst deleted file mode 100644 index 6a59bc7fb..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.rst +++ /dev/null @@ -1,132 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.interaction_indices` -============================================================ - -.. py:module:: fairchem.core.models.gemnet_oc.interaction_indices - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.interaction_indices.get_triplets - fairchem.core.models.gemnet_oc.interaction_indices.get_mixed_triplets - fairchem.core.models.gemnet_oc.interaction_indices.get_quadruplets - - - -.. py:function:: get_triplets(graph, num_atoms: int) - - Get all input edges b->a for each output edge c->a. - It is possible that b=c, as long as the edges are distinct - (i.e. atoms b and c stem from different unit cells). - - :param graph: Contains the graph's edge_index. - :type graph: dict of torch.Tensor - :param num_atoms: Total number of atoms. - :type num_atoms: int - - :returns: - - in: torch.Tensor, shape (num_triplets,) - Indices of input edge b->a of each triplet b->a<-c - out: torch.Tensor, shape (num_triplets,) - Indices of output edge c->a of each triplet b->a<-c - out_agg: torch.Tensor, shape (num_triplets,) - Indices enumerating the intermediate edges of each output edge. - Used for creating a padded matrix and aggregating via matmul. - :rtype: Dictionary containing the entries - - -.. py:function:: get_mixed_triplets(graph_in, graph_out, num_atoms, to_outedge=False, return_adj=False, return_agg_idx=False) - - Get all output edges (ingoing or outgoing) for each incoming edge. - It is possible that in atom=out atom, as long as the edges are distinct - (i.e. they stem from different unit cells). In edges and out edges stem - from separate graphs (hence "mixed") with shared atoms. - - :param graph_in: Contains the input graph's edge_index and cell_offset. - :type graph_in: dict of torch.Tensor - :param graph_out: Contains the output graph's edge_index and cell_offset. - Input and output graphs use the same atoms, but different edges. - :type graph_out: dict of torch.Tensor - :param num_atoms: Total number of atoms. - :type num_atoms: int - :param to_outedge: Whether to map the output to the atom's outgoing edges a->c - instead of the ingoing edges c->a. - :type to_outedge: bool - :param return_adj: Whether to output the adjacency (incidence) matrix between output - edges and atoms adj_edges. - :type return_adj: bool - :param return_agg_idx: Whether to output the indices enumerating the intermediate edges - of each output edge. - :type return_agg_idx: bool - - :returns: - - in: torch.Tensor, shape (num_triplets,) - Indices of input edges - out: torch.Tensor, shape (num_triplets,) - Indices of output edges - adj_edges: SparseTensor, shape (num_edges, num_atoms) - Adjacency (incidence) matrix between output edges and atoms, - with values specifying the input edges. - Only returned if return_adj is True. - out_agg: torch.Tensor, shape (num_triplets,) - Indices enumerating the intermediate edges of each output edge. - Used for creating a padded matrix and aggregating via matmul. - Only returned if return_agg_idx is True. - :rtype: Dictionary containing the entries - - -.. py:function:: get_quadruplets(main_graph, qint_graph, num_atoms) - - Get all d->b for each edge c->a and connection b->a - Careful about periodic images! - Separate interaction cutoff not supported. - - :param main_graph: Contains the main graph's edge_index and cell_offset. - The main graph defines which edges are embedded. - :type main_graph: dict of torch.Tensor - :param qint_graph: Contains the quadruplet interaction graph's edge_index and - cell_offset. main_graph and qint_graph use the same atoms, - but different edges. - :type qint_graph: dict of torch.Tensor - :param num_atoms: Total number of atoms. - :type num_atoms: int - - :returns: - - triplet_in['in']: torch.Tensor, shape (nTriplets,) - Indices of input edge d->b in triplet d->b->a. - triplet_in['out']: torch.Tensor, shape (nTriplets,) - Interaction indices of output edge b->a in triplet d->b->a. - triplet_out['in']: torch.Tensor, shape (nTriplets,) - Interaction indices of input edge b->a in triplet c->a<-b. - triplet_out['out']: torch.Tensor, shape (nTriplets,) - Indices of output edge c->a in triplet c->a<-b. - out: torch.Tensor, shape (nQuadruplets,) - Indices of output edge c->a in quadruplet - trip_in_to_quad: torch.Tensor, shape (nQuadruplets,) - Indices to map from input triplet d->b->a - to quadruplet d->b->a<-c. - trip_out_to_quad: torch.Tensor, shape (nQuadruplets,) - Indices to map from output triplet c->a<-b - to quadruplet d->b->a<-c. - out_agg: torch.Tensor, shape (num_triplets,) - Indices enumerating the intermediate edges of each output edge. - Used for creating a padded matrix and aggregating via matmul. - :rtype: Dictionary containing the entries - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.rst deleted file mode 100644 index b2ea43320..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.rst +++ /dev/null @@ -1,85 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.atom_update_block` -================================================================= - -.. py:module:: fairchem.core.models.gemnet_oc.layers.atom_update_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.atom_update_block.AtomUpdateBlock - fairchem.core.models.gemnet_oc.layers.atom_update_block.OutputBlock - - - - -.. py:class:: AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Aggregate the message embeddings of the atoms - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_rbf: Embedding size of the radial basis. - :type emb_size_rbf: int - :param nHidden: Number of residual blocks. - :type nHidden: int - :param activation: Name of the activation function to use in the dense layers. - :type activation: callable/str - - .. py:method:: get_mlp(units_in: int, units: int, nHidden: int, activation) - - - .. py:method:: forward(h: torch.Tensor, m, basis_rad, idx_atom) - - :returns: **h** -- Atom embedding. - :rtype: torch.Tensor, shape=(nAtoms, emb_size_atom) - - - -.. py:class:: OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, nHidden_afteratom: int, activation: str | None = None, direct_forces: bool = True) - - - Bases: :py:obj:`AtomUpdateBlock` - - Combines the atom update block and subsequent final dense layer. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_rbf: Embedding size of the radial basis. - :type emb_size_rbf: int - :param nHidden: Number of residual blocks before adding the atom embedding. - :type nHidden: int - :param nHidden_afteratom: Number of residual blocks after adding the atom embedding. - :type nHidden_afteratom: int - :param activation: Name of the activation function to use in the dense layers. - :type activation: str - :param direct_forces: If true directly predict forces, i.e. without taking the gradient - of the energy potential. - :type direct_forces: bool - - .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, basis_rad, idx_atom) - - :returns: * *torch.Tensor, shape=(nAtoms, emb_size_atom)* -- Output atom embeddings. - * *torch.Tensor, shape=(nEdges, emb_size_edge)* -- Output edge embeddings. - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.rst deleted file mode 100644 index 7ce2f33fc..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.rst +++ /dev/null @@ -1,111 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.base_layers` -=========================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.layers.base_layers - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.base_layers.Dense - fairchem.core.models.gemnet_oc.layers.base_layers.ScaledSiLU - fairchem.core.models.gemnet_oc.layers.base_layers.ResidualLayer - - - - -.. py:class:: Dense(in_features: int, out_features: int, bias: bool = False, activation: str | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Combines dense layer with scaling for silu activation. - - :param in_features: Input embedding size. - :type in_features: int - :param out_features: Output embedding size. - :type out_features: int - :param bias: True if use bias. - :type bias: bool - :param activation: Name of the activation function to use. - :type activation: str - - .. py:method:: reset_parameters(initializer=he_orthogonal_init) -> None - - - .. py:method:: forward(x) - - - -.. py:class:: ScaledSiLU - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x) - - - -.. py:class:: ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs) - - - Bases: :py:obj:`torch.nn.Module` - - Residual block with output scaled by 1/sqrt(2). - - :param units: Input and output embedding size. - :type units: int - :param nLayers: Number of dense layers. - :type nLayers: int - :param layer: Class for the layers inside the residual block. - :type layer: torch.nn.Module - :param layer_kwargs: Keyword arguments for initializing the layers. - :type layer_kwargs: str - - .. py:method:: forward(input) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.rst deleted file mode 100644 index 3ce52cef1..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.rst +++ /dev/null @@ -1,127 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.basis_utils` -=========================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.layers.basis_utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.basis_utils.Jn - fairchem.core.models.gemnet_oc.layers.basis_utils.Jn_zeros - fairchem.core.models.gemnet_oc.layers.basis_utils.spherical_bessel_formulas - fairchem.core.models.gemnet_oc.layers.basis_utils.bessel_basis - fairchem.core.models.gemnet_oc.layers.basis_utils.sph_harm_prefactor - fairchem.core.models.gemnet_oc.layers.basis_utils.associated_legendre_polynomials - fairchem.core.models.gemnet_oc.layers.basis_utils.real_sph_harm - fairchem.core.models.gemnet_oc.layers.basis_utils.get_sph_harm_basis - - - -.. py:function:: Jn(r: float, n: int) - - numerical spherical bessel functions of order n - - -.. py:function:: Jn_zeros(n: int, k: int) - - Compute the first k zeros of the spherical bessel functions - up to order n (excluded) - - -.. py:function:: spherical_bessel_formulas(n: int) - - Computes the sympy formulas for the spherical bessel functions - up to order n (excluded) - - -.. py:function:: bessel_basis(n: int, k: int) - - Compute the sympy formulas for the normalized and rescaled spherical bessel - functions up to order n (excluded) and maximum frequency k (excluded). - - :returns: **bess_basis** -- Bessel basis formulas taking in a single argument x. - Has length n where each element has length k. -> In total n*k many. - :rtype: list - - -.. py:function:: sph_harm_prefactor(l_degree: int, m_order: int) - - Computes the constant pre-factor for the spherical harmonic - of degree l and order m. - - :param l_degree: Degree of the spherical harmonic. l >= 0 - :type l_degree: int - :param m_order: Order of the spherical harmonic. -l <= m <= l - :type m_order: int - - :returns: **factor** - :rtype: float - - -.. py:function:: associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True) - - Computes string formulas of the associated legendre polynomials - up to degree L (excluded). - - :param L_maxdegree: Degree up to which to calculate the associated legendre polynomials - (degree L is excluded). - :type L_maxdegree: int - :param zero_m_only: If True only calculate the polynomials for the polynomials where m=0. - :type zero_m_only: bool - :param pos_m_only: If True only calculate the polynomials for the polynomials where m>=0. - Overwritten by zero_m_only. - :type pos_m_only: bool - - :returns: **polynomials** -- Contains the sympy functions of the polynomials - (in total L many if zero_m_only is True else L^2 many). - :rtype: list - - -.. py:function:: real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) -> None - - Computes formula strings of the the real part of the spherical harmonics - up to degree L (excluded). Variables are either spherical coordinates phi - and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE. - - :param L_maxdegree: Degree up to which to calculate the spherical harmonics - (degree L is excluded). - :type L_maxdegree: int - :param use_theta: - - True: Expects the input of the formula strings to contain theta. - - False: Expects the input of the formula strings to contain z. - :type use_theta: bool - :param use_phi: - - True: Expects the input of the formula strings to contain phi. - - False: Expects the input of the formula strings to contain x and y. - Does nothing if zero_m_only is True - :type use_phi: bool - :param zero_m_only: If True only calculate the harmonics where m=0. - :type zero_m_only: bool - - :returns: **Y_lm_real** -- Computes formula strings of the the real part of the spherical - harmonics up to degree L (where degree L is not excluded). - In total L^2 many sph harm exist up to degree L (excluded). - However, if zero_m_only only is True then the total count - is reduced to L. - :rtype: list - - -.. py:function:: get_sph_harm_basis(L_maxdegree: int, zero_m_only: bool = True) - - Get a function calculating the spherical harmonics basis from z and phi. - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.rst deleted file mode 100644 index c8e5da08d..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.rst +++ /dev/null @@ -1,118 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.efficient` -========================================================= - -.. py:module:: fairchem.core.models.gemnet_oc.layers.efficient - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.efficient.BasisEmbedding - fairchem.core.models.gemnet_oc.layers.efficient.EfficientInteractionBilinear - - - - -.. py:class:: BasisEmbedding(num_radial: int, emb_size_interm: int, num_spherical: int | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Embed a basis (CBF, SBF), optionally using the efficient reformulation. - - :param num_radial: Number of radial basis functions. - :type num_radial: int - :param emb_size_interm: Intermediate embedding size of triplets/quadruplets. - :type emb_size_interm: int - :param num_spherical: Number of circular/spherical basis functions. - Only required if there is a circular/spherical basis. - :type num_spherical: int - - .. py:attribute:: weight - :type: torch.nn.Parameter - - - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(rad_basis, sph_basis=None, idx_rad_outer=None, idx_rad_inner=None, idx_sph_outer=None, idx_sph_inner=None, num_atoms=None) - - :param rad_basis: Raw radial basis. - :type rad_basis: torch.Tensor, shape=(num_edges, num_radial or num_orders * num_radial) - :param sph_basis: Raw spherical or circular basis. - :type sph_basis: torch.Tensor, shape=(num_triplets or num_quadruplets, num_spherical) - :param idx_rad_outer: Atom associated with each radial basis value. - Optional, used for efficient edge aggregation. - :type idx_rad_outer: torch.Tensor, shape=(num_edges) - :param idx_rad_inner: Enumerates radial basis values per atom. - Optional, used for efficient edge aggregation. - :type idx_rad_inner: torch.Tensor, shape=(num_edges) - :param idx_sph_outer: Edge associated with each circular/spherical basis value. - Optional, used for efficient triplet/quadruplet aggregation. - :type idx_sph_outer: torch.Tensor, shape=(num_triplets or num_quadruplets) - :param idx_sph_inner: Enumerates circular/spherical basis values per edge. - Optional, used for efficient triplet/quadruplet aggregation. - :type idx_sph_inner: torch.Tensor, shape=(num_triplets or num_quadruplets) - :param num_atoms: Total number of atoms. - Optional, used for efficient edge aggregation. - :type num_atoms: int - - :returns: * **rad_W1** (*torch.Tensor, shape=(num_edges, emb_size_interm, num_spherical)*) - * **sph** (*torch.Tensor, shape=(num_edges, Kmax, num_spherical)*) -- Kmax = maximum number of neighbors of the edges - - - -.. py:class:: EfficientInteractionBilinear(emb_size_in: int, emb_size_interm: int, emb_size_out: int) - - - Bases: :py:obj:`torch.nn.Module` - - Efficient reformulation of the bilinear layer and subsequent summation. - - :param emb_size_in: Embedding size of input triplets/quadruplets. - :type emb_size_in: int - :param emb_size_interm: Intermediate embedding size of the basis transformation. - :type emb_size_interm: int - :param emb_size_out: Embedding size of output triplets/quadruplets. - :type emb_size_out: int - - .. py:method:: forward(basis, m, idx_agg_outer, idx_agg_inner, idx_agg2_outer=None, idx_agg2_inner=None, agg2_out_size=None) - - :param basis: - shapes=((num_edges, emb_size_interm, num_spherical), - (num_edges, num_spherical, Kmax)) - First element: Radial basis multiplied with weight matrix - Second element: Circular/spherical basis - :type basis: Tuple (torch.Tensor, torch.Tensor), - :param m: Input edge embeddings - :type m: torch.Tensor, shape=(num_edges, emb_size_in) - :param idx_agg_outer: Output edge aggregating this intermediate triplet/quadruplet edge. - :type idx_agg_outer: torch.Tensor, shape=(num_triplets or num_quadruplets) - :param idx_agg_inner: Enumerates intermediate edges per output edge. - :type idx_agg_inner: torch.Tensor, shape=(num_triplets or num_quadruplets) - :param idx_agg2_outer: Output atom aggregating this edge. - :type idx_agg2_outer: torch.Tensor, shape=(num_edges) - :param idx_agg2_inner: Enumerates edges per output atom. - :type idx_agg2_inner: torch.Tensor, shape=(num_edges) - :param agg2_out_size: Number of output embeddings when aggregating twice. Typically - the number of atoms. - :type agg2_out_size: int - - :returns: **m_ca** -- Aggregated edge/atom embeddings. - :rtype: torch.Tensor, shape=(num_edges, emb_size) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.rst deleted file mode 100644 index b53c9017c..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.rst +++ /dev/null @@ -1,74 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.embedding_block` -=============================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.layers.embedding_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.embedding_block.AtomEmbedding - fairchem.core.models.gemnet_oc.layers.embedding_block.EdgeEmbedding - - - - -.. py:class:: AtomEmbedding(emb_size: int, num_elements: int) - - - Bases: :py:obj:`torch.nn.Module` - - Initial atom embeddings based on the atom type - - :param emb_size: Atom embeddings size - :type emb_size: int - - .. py:method:: forward(Z) -> torch.Tensor - - :returns: **h** -- Atom embeddings. - :rtype: torch.Tensor, shape=(nAtoms, emb_size) - - - -.. py:class:: EdgeEmbedding(atom_features: int, edge_features: int, out_features: int, activation: str | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Edge embedding based on the concatenation of atom embeddings - and a subsequent dense layer. - - :param atom_features: Embedding size of the atom embedding. - :type atom_features: int - :param edge_features: Embedding size of the input edge embedding. - :type edge_features: int - :param out_features: Embedding size after the dense layer. - :type out_features: int - :param activation: Activation function used in the dense layer. - :type activation: str - - .. py:method:: forward(h: torch.Tensor, m: torch.Tensor, edge_index) -> torch.Tensor - - :param h: Atom embeddings. - :type h: torch.Tensor, shape (num_atoms, atom_features) - :param m: Radial basis in embedding block, - edge embedding in interaction block. - :type m: torch.Tensor, shape (num_edges, edge_features) - - :returns: **m_st** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, emb_size) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.rst deleted file mode 100644 index ba8de1bc2..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.force_scaler` -============================================================ - -.. py:module:: fairchem.core.models.gemnet_oc.layers.force_scaler - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.force_scaler.ForceScaler - - - - -.. py:class:: ForceScaler(init_scale: float = 2.0**8, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, max_force_iters: int = 50, enabled: bool = True) - - - Scales up the energy and then scales down the forces - to prevent NaNs and infs in calculations using AMP. - Inspired by torch.cuda.amp.GradScaler. - - .. py:method:: scale(energy) - - - .. py:method:: unscale(forces) - - - .. py:method:: calc_forces(energy, pos) - - - .. py:method:: calc_forces_and_update(energy, pos) - - - .. py:method:: update() -> None - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/index.rst deleted file mode 100644 index ff09fd285..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers` -=============================================== - -.. py:module:: fairchem.core.models.gemnet_oc.layers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - atom_update_block/index.rst - base_layers/index.rst - basis_utils/index.rst - efficient/index.rst - embedding_block/index.rst - force_scaler/index.rst - interaction_block/index.rst - radial_basis/index.rst - spherical_basis/index.rst - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.rst deleted file mode 100644 index 3e12cc37d..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.rst +++ /dev/null @@ -1,186 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.interaction_block` -================================================================= - -.. py:module:: fairchem.core.models.gemnet_oc.layers.interaction_block - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.interaction_block.InteractionBlock - fairchem.core.models.gemnet_oc.layers.interaction_block.QuadrupletInteraction - fairchem.core.models.gemnet_oc.layers.interaction_block.TripletInteraction - fairchem.core.models.gemnet_oc.layers.interaction_block.PairInteraction - - - - -.. py:class:: InteractionBlock(emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_a2a_in: int, emb_size_a2a_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_atom_emb_layers: int = 0, quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Interaction block for GemNet-Q/dQ. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_trip_in: int - :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_trip_out: int - :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_quad_in: int - :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_quad_out: int - :param emb_size_a2a_in: Embedding size in the atom interaction before the bilinear layer. - :type emb_size_a2a_in: int - :param emb_size_a2a_out: Embedding size in the atom interaction after the bilinear layer. - :type emb_size_a2a_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). - :type emb_size_sbf: int - :param num_before_skip: Number of residual blocks before the first skip connection. - :type num_before_skip: int - :param num_after_skip: Number of residual blocks after the first skip connection. - :type num_after_skip: int - :param num_concat: Number of residual blocks after the concatenation. - :type num_concat: int - :param num_atom: Number of residual blocks in the atom embedding blocks. - :type num_atom: int - :param num_atom_emb_layers: Number of residual blocks for transforming atom embeddings. - :type num_atom_emb_layers: int - :param quad_interaction: Whether to use quadruplet interactions. - :type quad_interaction: bool - :param atom_edge_interaction: Whether to use atom-to-edge interactions. - :type atom_edge_interaction: bool - :param edge_atom_interaction: Whether to use edge-to-atom interactions. - :type edge_atom_interaction: bool - :param atom_interaction: Whether to use atom-to-atom interactions. - :type atom_interaction: bool - :param activation: Name of the activation function to use in the dense layers. - :type activation: str - - .. py:method:: forward(h, m, bases_qint, bases_e2e, bases_a2e, bases_e2a, basis_a2a_rad, basis_atom_update, edge_index_main, a2ee2a_graph, a2a_graph, id_swap, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx) - - :returns: * **h** (*torch.Tensor, shape=(nEdges, emb_size_atom)*) -- Atom embeddings. - * **m** (*torch.Tensor, shape=(nEdges, emb_size_edge)*) -- Edge embeddings (c->a). - - - -.. py:class:: QuadrupletInteraction(emb_size_edge, emb_size_quad_in, emb_size_quad_out, emb_size_rbf, emb_size_cbf, emb_size_sbf, symmetric_mp=True, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Quadruplet-based message passing block. - - :param emb_size_edge: Embedding size of the edges. - :type emb_size_edge: int - :param emb_size_quad_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_quad_in: int - :param emb_size_quad_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_quad_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param emb_size_sbf: Embedding size of the spherical basis transformation (two angles). - :type emb_size_sbf: int - :param symmetric_mp: Whether to use symmetric message passing and - update the edges in both directions. - :type symmetric_mp: bool - :param activation: Name of the activation function to use in the dense layers. - :type activation: str - - .. py:method:: forward(m, bases, idx, id_swap) - - :returns: **m** -- Edge embeddings (c->a). - :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) - - - -.. py:class:: TripletInteraction(emb_size_in: int, emb_size_out: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_rbf: int, emb_size_cbf: int, symmetric_mp: bool = True, swap_output: bool = True, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Triplet-based message passing block. - - :param emb_size_in: Embedding size of the input embeddings. - :type emb_size_in: int - :param emb_size_out: Embedding size of the output embeddings. - :type emb_size_out: int - :param emb_size_trip_in: (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - :type emb_size_trip_in: int - :param emb_size_trip_out: (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - :type emb_size_trip_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param emb_size_cbf: Embedding size of the circular basis transformation (one angle). - :type emb_size_cbf: int - :param symmetric_mp: Whether to use symmetric message passing and - update the edges in both directions. - :type symmetric_mp: bool - :param swap_output: Whether to swap the output embedding directions. - Only relevant if symmetric_mp is False. - :type swap_output: bool - :param activation: Name of the activation function to use in the dense layers. - :type activation: str - - .. py:method:: forward(m, bases, idx, id_swap, expand_idx=None, idx_agg2=None, idx_agg2_inner=None, agg2_out_size=None) - - :returns: **m** -- Edge embeddings. - :rtype: torch.Tensor, shape=(nEdges, emb_size_edge) - - - -.. py:class:: PairInteraction(emb_size_atom, emb_size_pair_in, emb_size_pair_out, emb_size_rbf, activation=None) - - - Bases: :py:obj:`torch.nn.Module` - - Pair-based message passing block. - - :param emb_size_atom: Embedding size of the atoms. - :type emb_size_atom: int - :param emb_size_pair_in: Embedding size of the atom pairs before the bilinear layer. - :type emb_size_pair_in: int - :param emb_size_pair_out: Embedding size of the atom pairs after the bilinear layer. - :type emb_size_pair_out: int - :param emb_size_rbf: Embedding size of the radial basis transformation. - :type emb_size_rbf: int - :param activation: Name of the activation function to use in the dense layers. - :type activation: str - - .. py:method:: forward(h, rad_basis, edge_index, target_neighbor_idx) - - :returns: **h** -- Atom embeddings. - :rtype: torch.Tensor, shape=(num_atoms, emb_size_atom) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.rst deleted file mode 100644 index 7ab5b069d..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.rst +++ /dev/null @@ -1,156 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.radial_basis` -============================================================ - -.. py:module:: fairchem.core.models.gemnet_oc.layers.radial_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.radial_basis.PolynomialEnvelope - fairchem.core.models.gemnet_oc.layers.radial_basis.ExponentialEnvelope - fairchem.core.models.gemnet_oc.layers.radial_basis.GaussianBasis - fairchem.core.models.gemnet_oc.layers.radial_basis.SphericalBesselBasis - fairchem.core.models.gemnet_oc.layers.radial_basis.BernsteinBasis - fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis - - - - -.. py:class:: PolynomialEnvelope(exponent: int) - - - Bases: :py:obj:`torch.nn.Module` - - Polynomial envelope function that ensures a smooth cutoff. - - :param exponent: Exponent of the envelope function. - :type exponent: int - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: ExponentialEnvelope - - - Bases: :py:obj:`torch.nn.Module` - - Exponential envelope function that ensures a smooth cutoff, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: GaussianBasis(start: float = 0.0, stop: float = 5.0, num_gaussians: int = 50, trainable: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist: torch.Tensor) -> torch.Tensor - - - -.. py:class:: SphericalBesselBasis(num_radial: int, cutoff: float) - - - Bases: :py:obj:`torch.nn.Module` - - First-order spherical Bessel basis - - :param num_radial: Number of basis functions. Controls the maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264) - - - Bases: :py:obj:`torch.nn.Module` - - Bernstein polynomial basis, - as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. - SpookyNet: Learning Force Fields with Electronic Degrees of Freedom - and Nonlocal Effects - - :param num_radial: Number of basis functions. Controls the maximum frequency. - :type num_radial: int - :param pregamma_initial: Initial value of exponential coefficient gamma. - Default: gamma = 0.5 * a_0**-1 = 0.94486, - inverse softplus -> pregamma = log e**gamma - 1 = 0.45264 - :type pregamma_initial: float - - .. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor - - - -.. py:class:: RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, scale_basis: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - :param num_radial: Number of basis functions. Controls the maximum frequency. - :type num_radial: int - :param cutoff: Cutoff distance in Angstrom. - :type cutoff: float - :param rbf: Basis function and its hyperparameters. - :type rbf: dict = {"name": "gaussian"} - :param envelope: Envelope function and its hyperparameters. - :type envelope: dict = {"name": "polynomial", "exponent": 5} - :param scale_basis: Whether to scale the basis values for better numerical stability. - :type scale_basis: bool - - .. py:method:: forward(d: torch.Tensor) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.rst deleted file mode 100644 index 20dabbef4..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.layers.spherical_basis` -=============================================================== - -.. py:module:: fairchem.core.models.gemnet_oc.layers.spherical_basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.layers.spherical_basis.CircularBasisLayer - fairchem.core.models.gemnet_oc.layers.spherical_basis.SphericalBasisLayer - - - - -.. py:class:: CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis, cbf: dict, scale_basis: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - 2D Fourier Bessel Basis - - :param num_spherical: Number of basis functions. Controls the maximum frequency. - :type num_spherical: int - :param radial_basis: Radial basis function. - :type radial_basis: RadialBasis - :param cbf: Name and hyperparameters of the circular basis function. - :type cbf: dict - :param scale_basis: Whether to scale the basis values for better numerical stability. - :type scale_basis: bool - - .. py:method:: forward(D_ca, cosφ_cab) - - - -.. py:class:: SphericalBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis, sbf: dict, scale_basis: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - 3D Fourier Bessel Basis - - :param num_spherical: Number of basis functions. Controls the maximum frequency. - :type num_spherical: int - :param radial_basis: Radial basis functions. - :type radial_basis: RadialBasis - :param sbf: Name and hyperparameters of the spherical basis function. - :type sbf: dict - :param scale_basis: Whether to scale the basis values for better numerical stability. - :type scale_basis: bool - - .. py:method:: forward(D_ca, cosφ_cab, θ_cabd) - - - diff --git a/_sources/autoapi/fairchem/core/models/gemnet_oc/utils/index.rst b/_sources/autoapi/fairchem/core/models/gemnet_oc/utils/index.rst deleted file mode 100644 index 0ec6837bf..000000000 --- a/_sources/autoapi/fairchem/core/models/gemnet_oc/utils/index.rst +++ /dev/null @@ -1,176 +0,0 @@ -:py:mod:`fairchem.core.models.gemnet_oc.utils` -============================================== - -.. py:module:: fairchem.core.models.gemnet_oc.utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.gemnet_oc.utils.ragged_range - fairchem.core.models.gemnet_oc.utils.repeat_blocks - fairchem.core.models.gemnet_oc.utils.masked_select_sparsetensor_flat - fairchem.core.models.gemnet_oc.utils.calculate_interatomic_vectors - fairchem.core.models.gemnet_oc.utils.inner_product_clamped - fairchem.core.models.gemnet_oc.utils.get_angle - fairchem.core.models.gemnet_oc.utils.vector_rejection - fairchem.core.models.gemnet_oc.utils.get_projected_angle - fairchem.core.models.gemnet_oc.utils.mask_neighbors - fairchem.core.models.gemnet_oc.utils.get_neighbor_order - fairchem.core.models.gemnet_oc.utils.get_inner_idx - fairchem.core.models.gemnet_oc.utils.get_edge_id - - - -.. py:function:: ragged_range(sizes) - - Multiple concatenated ranges. - - .. rubric:: Examples - - sizes = [1 4 2 3] - Return: [0 0 1 2 3 0 1 0 1 2] - - -.. py:function:: repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor - - Repeat blocks of indices. - Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements - - continuous_indexing: Whether to keep increasing the index after each block - start_idx: Starting index - block_inc: Number to increment by after each block, - either global or per block. Shape: len(sizes) - 1 - repeat_inc: Number to increment by after each repetition, - either global or per block - - .. rubric:: Examples - - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False - Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - repeat_inc = 4 - Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - start_idx = 5 - Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - block_inc = 1 - Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] - sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 1 2 0 1 2 3 4 3 4 3 4] - sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True - Return: [0 1 0 1 5 6 5 6] - - -.. py:function:: masked_select_sparsetensor_flat(src, mask) -> torch_sparse.SparseTensor - - -.. py:function:: calculate_interatomic_vectors(R, id_s, id_t, offsets_st) - - Calculate the vectors connecting the given atom pairs, - considering offsets from periodic boundary conditions (PBC). - - :param R: Atom positions. - :type R: Tensor, shape = (nAtoms, 3) - :param id_s: Indices of the source atom of the edges. - :type id_s: Tensor, shape = (nEdges,) - :param id_t: Indices of the target atom of the edges. - :type id_t: Tensor, shape = (nEdges,) - :param offsets_st: PBC offsets of the edges. - Subtract this from the correct direction. - :type offsets_st: Tensor, shape = (nEdges,) - - :returns: **(D_st, V_st)** -- - - D_st: Tensor, shape = (nEdges,) - Distance from atom t to s. - V_st: Tensor, shape = (nEdges,) - Unit direction from atom t to s. - :rtype: tuple - - -.. py:function:: inner_product_clamped(x, y) -> torch.Tensor - - Calculate the inner product between the given normalized vectors, - giving a result between -1 and 1. - - -.. py:function:: get_angle(R_ac, R_ab) -> torch.Tensor - - Calculate angles between atoms c -> a <- b. - - :param R_ac: Vector from atom a to c. - :type R_ac: Tensor, shape = (N, 3) - :param R_ab: Vector from atom a to b. - :type R_ab: Tensor, shape = (N, 3) - - :returns: **angle_cab** -- Angle between atoms c <- a -> b. - :rtype: Tensor, shape = (N,) - - -.. py:function:: vector_rejection(R_ab, P_n) - - Project the vector R_ab onto a plane with normal vector P_n. - - :param R_ab: Vector from atom a to b. - :type R_ab: Tensor, shape = (N, 3) - :param P_n: Normal vector of a plane onto which to project R_ab. - :type P_n: Tensor, shape = (N, 3) - - :returns: **R_ab_proj** -- Projected vector (orthogonal to P_n). - :rtype: Tensor, shape = (N, 3) - - -.. py:function:: get_projected_angle(R_ab, P_n, eps: float = 0.0001) -> torch.Tensor - - Project the vector R_ab onto a plane with normal vector P_n, - then calculate the angle w.r.t. the (x [cross] P_n), - or (y [cross] P_n) if the former would be ill-defined/numerically unstable. - - :param R_ab: Vector from atom a to b. - :type R_ab: Tensor, shape = (N, 3) - :param P_n: Normal vector of a plane onto which to project R_ab. - :type P_n: Tensor, shape = (N, 3) - :param eps: Norm of projection below which to use the y-axis instead of x. - :type eps: float - - :returns: **angle_ab** -- Angle on plane w.r.t. x- or y-axis. - :rtype: Tensor, shape = (N) - - -.. py:function:: mask_neighbors(neighbors, edge_mask) - - -.. py:function:: get_neighbor_order(num_atoms: int, index, atom_distance) -> torch.Tensor - - Give a mask that filters out edges so that each atom has at most - `max_num_neighbors_threshold` neighbors. - - -.. py:function:: get_inner_idx(idx, dim_size) - - Assign an inner index to each element (neighbor) with the same index. - For example, with idx=[0 0 0 1 1 1 1 2 2] this returns [0 1 2 0 1 2 3 0 1]. - These indices allow reshape neighbor indices into a dense matrix. - idx has to be sorted for this to work. - - -.. py:function:: get_edge_id(edge_idx, cell_offsets, num_atoms: int) - - diff --git a/_sources/autoapi/fairchem/core/models/index.rst b/_sources/autoapi/fairchem/core/models/index.rst deleted file mode 100644 index fd996b4a0..000000000 --- a/_sources/autoapi/fairchem/core/models/index.rst +++ /dev/null @@ -1,72 +0,0 @@ -:py:mod:`fairchem.core.models` -============================== - -.. py:module:: fairchem.core.models - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - equiformer_v2/index.rst - escn/index.rst - gemnet/index.rst - gemnet_gp/index.rst - gemnet_oc/index.rst - painn/index.rst - scn/index.rst - utils/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - base/index.rst - dimenet_plus_plus/index.rst - model_registry/index.rst - schnet/index.rst - - -Package Contents ----------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.model_name_to_local_file - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.available_pretrained_models - - -.. py:data:: available_pretrained_models - - - -.. py:function:: model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) -> str - - Download a pretrained checkpoint if it does not exist already - - :param model_name: the model name. See available_pretrained_checkpoints. - :type model_name: str - :param local_cache: path to local cache directory - :type local_cache: str or Path - - :returns: local path to checkpoint file - :rtype: str - - diff --git a/_sources/autoapi/fairchem/core/models/model_registry/index.rst b/_sources/autoapi/fairchem/core/models/model_registry/index.rst deleted file mode 100644 index a5ccdf72e..000000000 --- a/_sources/autoapi/fairchem/core/models/model_registry/index.rst +++ /dev/null @@ -1,57 +0,0 @@ -:py:mod:`fairchem.core.models.model_registry` -============================================= - -.. py:module:: fairchem.core.models.model_registry - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.model_registry.model_name_to_local_file - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.model_registry.MODEL_REGISTRY - fairchem.core.models.model_registry.available_pretrained_models - - -.. py:data:: MODEL_REGISTRY - - - -.. py:data:: available_pretrained_models - - - -.. py:function:: model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) -> str - - Download a pretrained checkpoint if it does not exist already - - :param model_name: the model name. See available_pretrained_checkpoints. - :type model_name: str - :param local_cache: path to local cache directory - :type local_cache: str or Path - - :returns: local path to checkpoint file - :rtype: str - - diff --git a/_sources/autoapi/fairchem/core/models/painn/index.rst b/_sources/autoapi/fairchem/core/models/painn/index.rst deleted file mode 100644 index d7b758d01..000000000 --- a/_sources/autoapi/fairchem/core/models/painn/index.rst +++ /dev/null @@ -1,73 +0,0 @@ -:py:mod:`fairchem.core.models.painn` -==================================== - -.. py:module:: fairchem.core.models.painn - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - painn/index.rst - utils/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.painn.PaiNN - - - - -.. py:class:: PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - PaiNN model based on the description in Schütt et al. (2021): - Equivariant message passing for the prediction of tensorial properties - and molecular spectra, https://arxiv.org/abs/2102.03150. - - .. py:property:: num_params - :type: int - - - .. py:method:: reset_parameters() -> None - - - .. py:method:: select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor - - - .. py:method:: symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg) - - Symmetrize edges to ensure existence of counter-directional edges. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. - If `symmetric_edge_symmetrization` is False, - we only use i->j edges here. So we lose some j->i edges - and add others by making it symmetric. - If `symmetric_edge_symmetrization` is True, - we always use both directions. - - - .. py:method:: generate_graph_values(data) - - - .. py:method:: forward(data) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - diff --git a/_sources/autoapi/fairchem/core/models/painn/painn/index.rst b/_sources/autoapi/fairchem/core/models/painn/painn/index.rst deleted file mode 100644 index d20d28cdb..000000000 --- a/_sources/autoapi/fairchem/core/models/painn/painn/index.rst +++ /dev/null @@ -1,312 +0,0 @@ -:py:mod:`fairchem.core.models.painn.painn` -========================================== - -.. py:module:: fairchem.core.models.painn.painn - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - --- - - MIT License - - Copyright (c) 2021 www.compscience.org - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.painn.painn.PaiNN - fairchem.core.models.painn.painn.PaiNNMessage - fairchem.core.models.painn.painn.PaiNNUpdate - fairchem.core.models.painn.painn.PaiNNOutput - fairchem.core.models.painn.painn.GatedEquivariantBlock - - - - -.. py:class:: PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - PaiNN model based on the description in Schütt et al. (2021): - Equivariant message passing for the prediction of tensorial properties - and molecular spectra, https://arxiv.org/abs/2102.03150. - - .. py:property:: num_params - :type: int - - - .. py:method:: reset_parameters() -> None - - - .. py:method:: select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) -> torch.Tensor - - - .. py:method:: symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg) - - Symmetrize edges to ensure existence of counter-directional edges. - - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. - If `symmetric_edge_symmetrization` is False, - we only use i->j edges here. So we lose some j->i edges - and add others by making it symmetric. - If `symmetric_edge_symmetrization` is True, - we always use both directions. - - - .. py:method:: generate_graph_values(data) - - - .. py:method:: forward(data) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - -.. py:class:: PaiNNMessage(hidden_channels, num_rbf) - - - Bases: :py:obj:`torch_geometric.nn.MessagePassing` - - Base class for creating message passing layers of the form - - .. math:: - \mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, - \bigoplus_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} - \left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right), - - where :math:`\bigoplus` denotes a differentiable, permutation invariant - function, *e.g.*, sum, mean, min, max or mul, and - :math:`\gamma_{\mathbf{\Theta}}` and :math:`\phi_{\mathbf{\Theta}}` denote - differentiable functions such as MLPs. - See `here `__ for the accompanying tutorial. - - :param aggr: The aggregation scheme - to use, *e.g.*, :obj:`"add"`, :obj:`"sum"` :obj:`"mean"`, - :obj:`"min"`, :obj:`"max"` or :obj:`"mul"`. - In addition, can be any - :class:`~torch_geometric.nn.aggr.Aggregation` module (or any string - that automatically resolves to it). - If given as a list, will make use of multiple aggregations in which - different outputs will get concatenated in the last dimension. - If set to :obj:`None`, the :class:`MessagePassing` instantiation is - expected to implement its own aggregation logic via - :meth:`aggregate`. (default: :obj:`"add"`) - :type aggr: str or [str] or Aggregation, optional - :param aggr_kwargs: Arguments passed to the - respective aggregation function in case it gets automatically - resolved. (default: :obj:`None`) - :type aggr_kwargs: Dict[str, Any], optional - :param flow: The flow direction of message passing - (:obj:`"source_to_target"` or :obj:`"target_to_source"`). - (default: :obj:`"source_to_target"`) - :type flow: str, optional - :param node_dim: The axis along which to propagate. - (default: :obj:`-2`) - :type node_dim: int, optional - :param decomposed_layers: The number of feature decomposition - layers, as introduced in the `"Optimizing Memory Efficiency of - Graph Neural Networks on Edge Computing Platforms" - `_ paper. - Feature decomposition reduces the peak memory usage by slicing - the feature dimensions into separated feature decomposition layers - during GNN aggregation. - This method can accelerate GNN execution on CPU-based platforms - (*e.g.*, 2-3x speedup on the - :class:`~torch_geometric.datasets.Reddit` dataset) for common GNN - models such as :class:`~torch_geometric.nn.models.GCN`, - :class:`~torch_geometric.nn.models.GraphSAGE`, - :class:`~torch_geometric.nn.models.GIN`, etc. - However, this method is not applicable to all GNN operators - available, in particular for operators in which message computation - can not easily be decomposed, *e.g.* in attention-based GNNs. - The selection of the optimal value of :obj:`decomposed_layers` - depends both on the specific graph dataset and available hardware - resources. - A value of :obj:`2` is suitable in most cases. - Although the peak memory usage is directly associated with the - granularity of feature decomposition, the same is not necessarily - true for execution speedups. (default: :obj:`1`) - :type decomposed_layers: int, optional - - .. py:method:: reset_parameters() -> None - - Resets all learnable parameters of the module. - - - .. py:method:: forward(x, vec, edge_index, edge_rbf, edge_vector) - - Runs the forward pass of the module. - - - .. py:method:: message(xh_j, vec_j, rbfh_ij, r_ij) - - Constructs messages from node :math:`j` to node :math:`i` - in analogy to :math:`\phi_{\mathbf{\Theta}}` for each edge in - :obj:`edge_index`. - This function can take any argument as input which was initially - passed to :meth:`propagate`. - Furthermore, tensors passed to :meth:`propagate` can be mapped to the - respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or - :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. - - - .. py:method:: aggregate(features: tuple[torch.Tensor, torch.Tensor], index: torch.Tensor, dim_size: int) -> tuple[torch.Tensor, torch.Tensor] - - Aggregates messages from neighbors as - :math:`\bigoplus_{j \in \mathcal{N}(i)}`. - - Takes in the output of message computation as first argument and any - argument which was initially passed to :meth:`propagate`. - - By default, this function will delegate its call to the underlying - :class:`~torch_geometric.nn.aggr.Aggregation` module to reduce messages - as specified in :meth:`__init__` by the :obj:`aggr` argument. - - - .. py:method:: update(inputs: tuple[torch.Tensor, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor] - - Updates node embeddings in analogy to - :math:`\gamma_{\mathbf{\Theta}}` for each node - :math:`i \in \mathcal{V}`. - Takes in the output of aggregation as first argument and any argument - which was initially passed to :meth:`propagate`. - - - -.. py:class:: PaiNNUpdate(hidden_channels) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(x, vec) - - - -.. py:class:: PaiNNOutput(hidden_channels) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(x, vec) - - - -.. py:class:: GatedEquivariantBlock(hidden_channels, out_channels) - - - Bases: :py:obj:`torch.nn.Module` - - Gated Equivariant Block as defined in Schütt et al. (2021): - Equivariant message passing for the prediction of tensorial properties and molecular spectra - - .. py:method:: reset_parameters() -> None - - - .. py:method:: forward(x, v) - - - diff --git a/_sources/autoapi/fairchem/core/models/painn/utils/index.rst b/_sources/autoapi/fairchem/core/models/painn/utils/index.rst deleted file mode 100644 index 7a304580d..000000000 --- a/_sources/autoapi/fairchem/core/models/painn/utils/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -:py:mod:`fairchem.core.models.painn.utils` -========================================== - -.. py:module:: fairchem.core.models.painn.utils - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.painn.utils.repeat_blocks - fairchem.core.models.painn.utils.get_edge_id - - - -.. py:function:: repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) -> torch.Tensor - - Repeat blocks of indices. - Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements - - continuous_indexing: Whether to keep increasing the index after each block - start_idx: Starting index - block_inc: Number to increment by after each block, - either global or per block. Shape: len(sizes) - 1 - repeat_inc: Number to increment by after each repetition, - either global or per block - - .. rubric:: Examples - - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False - Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - repeat_inc = 4 - Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - start_idx = 5 - Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] - sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; - block_inc = 1 - Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] - sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True - Return: [0 1 2 0 1 2 3 4 3 4 3 4] - sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True - Return: [0 1 0 1 5 6 5 6] - - -.. py:function:: get_edge_id(edge_idx, cell_offsets, num_atoms: int) - - diff --git a/_sources/autoapi/fairchem/core/models/schnet/index.rst b/_sources/autoapi/fairchem/core/models/schnet/index.rst deleted file mode 100644 index 42f2db5fe..000000000 --- a/_sources/autoapi/fairchem/core/models/schnet/index.rst +++ /dev/null @@ -1,98 +0,0 @@ -:py:mod:`fairchem.core.models.schnet` -===================================== - -.. py:module:: fairchem.core.models.schnet - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.schnet.SchNetWrap - - - - -.. py:class:: SchNetWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, hidden_channels: int = 128, num_filters: int = 128, num_interactions: int = 6, num_gaussians: int = 50, cutoff: float = 10.0, readout: str = 'add') - - - Bases: :py:obj:`torch_geometric.nn.SchNet`, :py:obj:`fairchem.core.models.base.BaseModel` - - Wrapper around the continuous-filter convolutional neural network SchNet from the - `"SchNet: A Continuous-filter Convolutional Neural Network for Modeling - Quantum Interactions" `_. Each layer uses interaction - block of the form: - - .. math:: - \mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot - h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))), - - :param num_atoms: Unused argument - :type num_atoms: int - :param bond_feat_dim: Unused argument - :type bond_feat_dim: int - :param num_targets: Number of targets to predict. - :type num_targets: int - :param use_pbc: If set to :obj:`True`, account for periodic boundary conditions. - (default: :obj:`True`) - :type use_pbc: bool, optional - :param regress_forces: If set to :obj:`True`, predict forces by differentiating - energy with respect to positions. - (default: :obj:`True`) - :type regress_forces: bool, optional - :param otf_graph: If set to :obj:`True`, compute graph edges on the fly. - (default: :obj:`False`) - :type otf_graph: bool, optional - :param hidden_channels: Number of hidden channels. - (default: :obj:`128`) - :type hidden_channels: int, optional - :param num_filters: Number of filters to use. - (default: :obj:`128`) - :type num_filters: int, optional - :param num_interactions: Number of interaction blocks - (default: :obj:`6`) - :type num_interactions: int, optional - :param num_gaussians: The number of gaussians :math:`\mu`. - (default: :obj:`50`) - :type num_gaussians: int, optional - :param cutoff: Cutoff distance for interatomic interactions. - (default: :obj:`10.0`) - :type cutoff: float, optional - :param readout: Whether to apply :obj:`"add"` or - :obj:`"mean"` global aggregation. (default: :obj:`"add"`) - :type readout: string, optional - - .. py:property:: num_params - :type: int - - - .. py:method:: _forward(data) - - - .. py:method:: forward(data) - - :param z: Atomic number of each atom with shape - :obj:`[num_atoms]`. - :type z: torch.Tensor - :param pos: Coordinates of each atom with shape - :obj:`[num_atoms, 3]`. - :type pos: torch.Tensor - :param batch: Batch indices assigning each atom - to a separate molecule with shape :obj:`[num_atoms]`. - (default: :obj:`None`) - :type batch: torch.Tensor, optional - - - diff --git a/_sources/autoapi/fairchem/core/models/scn/index.rst b/_sources/autoapi/fairchem/core/models/scn/index.rst deleted file mode 100644 index 6fb7677ad..000000000 --- a/_sources/autoapi/fairchem/core/models/scn/index.rst +++ /dev/null @@ -1,131 +0,0 @@ -:py:mod:`fairchem.core.models.scn` -================================== - -.. py:module:: fairchem.core.models.scn - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - sampling/index.rst - scn/index.rst - smearing/index.rst - spherical_harmonics/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.SphericalChannelNetwork - - - - -.. py:class:: SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Spherical Channel Network - Paper: Spherical Channels for Modeling Atomic Interactions - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_num_neighbors: Maximum number of neighbors per atom - :type max_num_neighbors: int - :param cutoff: Maximum distance between nieghboring atoms in Angstroms - :type cutoff: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_interactions: Number of layers in the GNN - :type num_interactions: int - :param lmax: Maximum degree of the spherical harmonics (1 to 10) - :type lmax: int - :param mmax: Maximum order of the spherical harmonics (0 or 1) - :type mmax: int - :param num_resolutions: Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2) - :type num_resolutions: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param sphere_channels_reduce: Number of spherical channels used during message passing (downsample or upsample) - :type sphere_channels_reduce: int - :param hidden_channels: Number of hidden units in message passing - :type hidden_channels: int - :param num_taps: Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax) - :type num_taps: int - :param use_grid: Use non-linear pointwise convolution during aggregation - :type use_grid: bool - :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) - :type num_bands: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param num_basis_functions: Number of basis functions used for distance and atomic number blocks - :type num_basis_functions: int - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param basis_width_scalar: Width of distance basis function - :type basis_width_scalar: float - :param distance_resolution: Distance between distance basis functions in Angstroms - :type distance_resolution: float - :param show_timing_info: Show timing and memory info - :type show_timing_info: bool - - .. py:property:: num_params - :type: int - - - .. py:attribute:: energy_fc1 - :type: torch.nn.Linear - - - - .. py:attribute:: energy_fc2 - :type: torch.nn.Linear - - - - .. py:attribute:: energy_fc3 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc1 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc2 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc3 - :type: torch.nn.Linear - - - - .. py:method:: forward(data) - - - .. py:method:: _forward_helper(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - .. py:method:: _rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/scn/sampling/index.rst b/_sources/autoapi/fairchem/core/models/scn/sampling/index.rst deleted file mode 100644 index a019b290a..000000000 --- a/_sources/autoapi/fairchem/core/models/scn/sampling/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -:py:mod:`fairchem.core.models.scn.sampling` -=========================================== - -.. py:module:: fairchem.core.models.scn.sampling - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.sampling.CalcSpherePoints - fairchem.core.models.scn.sampling.CalcSpherePointsRandom - - - -.. py:function:: CalcSpherePoints(num_points: int, device: str = 'cpu') -> torch.Tensor - - -.. py:function:: CalcSpherePointsRandom(num_points: int, device) -> torch.Tensor - - diff --git a/_sources/autoapi/fairchem/core/models/scn/scn/index.rst b/_sources/autoapi/fairchem/core/models/scn/scn/index.rst deleted file mode 100644 index 0533ed38c..000000000 --- a/_sources/autoapi/fairchem/core/models/scn/scn/index.rst +++ /dev/null @@ -1,250 +0,0 @@ -:py:mod:`fairchem.core.models.scn.scn` -====================================== - -.. py:module:: fairchem.core.models.scn.scn - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.scn.SphericalChannelNetwork - fairchem.core.models.scn.scn.EdgeBlock - fairchem.core.models.scn.scn.MessageBlock - fairchem.core.models.scn.scn.DistanceBlock - - - - -.. py:class:: SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True) - - - Bases: :py:obj:`fairchem.core.models.base.BaseModel` - - Spherical Channel Network - Paper: Spherical Channels for Modeling Atomic Interactions - - :param use_pbc: Use periodic boundary conditions - :type use_pbc: bool - :param regress_forces: Compute forces - :type regress_forces: bool - :param otf_graph: Compute graph On The Fly (OTF) - :type otf_graph: bool - :param max_num_neighbors: Maximum number of neighbors per atom - :type max_num_neighbors: int - :param cutoff: Maximum distance between nieghboring atoms in Angstroms - :type cutoff: float - :param max_num_elements: Maximum atomic number - :type max_num_elements: int - :param num_interactions: Number of layers in the GNN - :type num_interactions: int - :param lmax: Maximum degree of the spherical harmonics (1 to 10) - :type lmax: int - :param mmax: Maximum order of the spherical harmonics (0 or 1) - :type mmax: int - :param num_resolutions: Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2) - :type num_resolutions: int - :param sphere_channels: Number of spherical channels - :type sphere_channels: int - :param sphere_channels_reduce: Number of spherical channels used during message passing (downsample or upsample) - :type sphere_channels_reduce: int - :param hidden_channels: Number of hidden units in message passing - :type hidden_channels: int - :param num_taps: Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax) - :type num_taps: int - :param use_grid: Use non-linear pointwise convolution during aggregation - :type use_grid: bool - :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) - :type num_bands: int - :param num_sphere_samples: Number of samples used to approximate the integration of the sphere in the output blocks - :type num_sphere_samples: int - :param num_basis_functions: Number of basis functions used for distance and atomic number blocks - :type num_basis_functions: int - :param distance_function: Basis function used for distances - :type distance_function: "gaussian", "sigmoid", "linearsigmoid", "silu" - :param basis_width_scalar: Width of distance basis function - :type basis_width_scalar: float - :param distance_resolution: Distance between distance basis functions in Angstroms - :type distance_resolution: float - :param show_timing_info: Show timing and memory info - :type show_timing_info: bool - - .. py:property:: num_params - :type: int - - - .. py:attribute:: energy_fc1 - :type: torch.nn.Linear - - - - .. py:attribute:: energy_fc2 - :type: torch.nn.Linear - - - - .. py:attribute:: energy_fc3 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc1 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc2 - :type: torch.nn.Linear - - - - .. py:attribute:: force_fc3 - :type: torch.nn.Linear - - - - .. py:method:: forward(data) - - - .. py:method:: _forward_helper(data) - - - .. py:method:: _init_edge_rot_mat(data, edge_index, edge_distance_vec) - - - .. py:method:: _rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) -> torch.Tensor - - - -.. py:class:: EdgeBlock(num_resolutions: int, sphere_channels_reduce, hidden_channels_list, cutoff_list, sphharm_list, sphere_channels, distance_expansion, max_num_elements: int, num_basis_functions: int, num_gaussians: int, use_grid: bool, act) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x, atomic_numbers, edge_distance, edge_index, cutoff_index) - - - -.. py:class:: MessageBlock(sphere_channels_reduce, hidden_channels, num_basis_functions, sphharm, act) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x, x_edge, edge_index) - - - -.. py:class:: DistanceBlock(in_channels, num_basis_functions: int, distance_expansion, max_num_elements: int, act) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(edge_distance, source_element, target_element) - - - diff --git a/_sources/autoapi/fairchem/core/models/scn/smearing/index.rst b/_sources/autoapi/fairchem/core/models/scn/smearing/index.rst deleted file mode 100644 index 9bbcfb760..000000000 --- a/_sources/autoapi/fairchem/core/models/scn/smearing/index.rst +++ /dev/null @@ -1,190 +0,0 @@ -:py:mod:`fairchem.core.models.scn.smearing` -=========================================== - -.. py:module:: fairchem.core.models.scn.smearing - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.smearing.GaussianSmearing - fairchem.core.models.scn.smearing.SigmoidSmearing - fairchem.core.models.scn.smearing.LinearSigmoidSmearing - fairchem.core.models.scn.smearing.SiLUSmearing - - - - -.. py:class:: GaussianSmearing(start: float = -5.0, stop: float = 5.0, num_gaussians: int = 50, basis_width_scalar: float = 1.0) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist) -> torch.Tensor - - - -.. py:class:: SigmoidSmearing(start=-5.0, stop=5.0, num_sigmoid=50, basis_width_scalar=1.0) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist) -> torch.Tensor - - - -.. py:class:: LinearSigmoidSmearing(start: float = -5.0, stop: float = 5.0, num_sigmoid: int = 50, basis_width_scalar: float = 1.0) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist) -> torch.Tensor - - - -.. py:class:: SiLUSmearing(start: float = -5.0, stop: float = 5.0, num_output: int = 50, basis_width_scalar: float = 1.0) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(dist) - - - diff --git a/_sources/autoapi/fairchem/core/models/scn/spherical_harmonics/index.rst b/_sources/autoapi/fairchem/core/models/scn/spherical_harmonics/index.rst deleted file mode 100644 index 58310ef86..000000000 --- a/_sources/autoapi/fairchem/core/models/scn/spherical_harmonics/index.rst +++ /dev/null @@ -1,102 +0,0 @@ -:py:mod:`fairchem.core.models.scn.spherical_harmonics` -====================================================== - -.. py:module:: fairchem.core.models.scn.spherical_harmonics - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.spherical_harmonics.SphericalHarmonicsHelper - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.spherical_harmonics.wigner_D - fairchem.core.models.scn.spherical_harmonics._z_rot_mat - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.scn.spherical_harmonics._Jd - - -.. py:data:: _Jd - - - -.. py:class:: SphericalHarmonicsHelper(lmax: int, mmax: int, num_taps: int, num_bands: int) - - - Helper functions for spherical harmonics calculations and representations - - :param lmax: Maximum degree of the spherical harmonics - :type lmax: int - :param mmax: Maximum order of the spherical harmonics - :type mmax: int - :param num_taps: Number of taps or rotations (1 or otherwise set automatically based on mmax) - :type num_taps: int - :param num_bands: Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2) - :type num_bands: int - - .. py:method:: InitWignerDMatrix(edge_rot_mat) -> None - - - .. py:method:: InitYRotMapping() - - - .. py:method:: ToGrid(x, channels) -> torch.Tensor - - - .. py:method:: FromGrid(x_grid, channels) -> torch.Tensor - - - .. py:method:: CombineYRotations(x) -> torch.Tensor - - - .. py:method:: Rotate(x) -> torch.Tensor - - - .. py:method:: FlipGrid(grid, num_channels: int) -> torch.Tensor - - - .. py:method:: RotateInv(x) -> torch.Tensor - - - .. py:method:: RotateWigner(x, wigner) -> torch.Tensor - - - .. py:method:: RotationMatrix(rot_x: float, rot_y: float, rot_z: float) -> torch.Tensor - - - .. py:method:: RotationToWignerDMatrix(edge_rot_mat, start_lmax, end_lmax) - - - -.. py:function:: wigner_D(l, alpha, beta, gamma) - - -.. py:function:: _z_rot_mat(angle, l) - - diff --git a/_sources/autoapi/fairchem/core/models/utils/activations/index.rst b/_sources/autoapi/fairchem/core/models/utils/activations/index.rst deleted file mode 100644 index 8f533bcc0..000000000 --- a/_sources/autoapi/fairchem/core/models/utils/activations/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -:py:mod:`fairchem.core.models.utils.activations` -================================================ - -.. py:module:: fairchem.core.models.utils.activations - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.utils.activations.Act - - - - -.. py:class:: Act(act: str, slope: float = 0.05) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input: torch.Tensor) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/utils/basis/index.rst b/_sources/autoapi/fairchem/core/models/utils/basis/index.rst deleted file mode 100644 index 52e43d5d0..000000000 --- a/_sources/autoapi/fairchem/core/models/utils/basis/index.rst +++ /dev/null @@ -1,328 +0,0 @@ -:py:mod:`fairchem.core.models.utils.basis` -========================================== - -.. py:module:: fairchem.core.models.utils.basis - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.models.utils.basis.Sine - fairchem.core.models.utils.basis.SIREN - fairchem.core.models.utils.basis.SINESmearing - fairchem.core.models.utils.basis.GaussianSmearing - fairchem.core.models.utils.basis.FourierSmearing - fairchem.core.models.utils.basis.Basis - fairchem.core.models.utils.basis.SphericalSmearing - - - - -.. py:class:: Sine(w0: float = 30.0) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: SIREN(layers: list[int], num_in_features: int, out_features: int, w0: float = 30.0, initializer: str | None = 'siren', c: float = 6) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(X: torch.Tensor) -> torch.Tensor - - - -.. py:class:: SINESmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: GaussianSmearing(num_in_features: int, start: int = 0, end: int = 1, num_freqs: int = 50) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: FourierSmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(x: torch.Tensor) -> torch.Tensor - - - -.. py:class:: Basis(num_in_features: int, num_freqs: int = 50, basis_type: str = 'powersine', act: str = 'ssp', sph: SphericalSmearing | None = None) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:attribute:: smearing - :type: SINESmearing | FourierSmearing | GaussianSmearing | torch.nn.Sequential - - - - .. py:method:: forward(x: torch.Tensor, edge_attr_sph: torch.Tensor | None = None) - - - -.. py:class:: SphericalSmearing(max_n: int = 10, option: str = 'all') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:attribute:: m - :type: numpy.typing.NDArray[numpy.int_] - - - - .. py:attribute:: n - :type: numpy.typing.NDArray[numpy.int_] - - - - .. py:method:: forward(xyz: torch.Tensor) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/models/utils/index.rst b/_sources/autoapi/fairchem/core/models/utils/index.rst deleted file mode 100644 index 1d6ff8ba7..000000000 --- a/_sources/autoapi/fairchem/core/models/utils/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.core.models.utils` -==================================== - -.. py:module:: fairchem.core.models.utils - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - activations/index.rst - basis/index.rst - - diff --git a/_sources/autoapi/fairchem/core/modules/evaluator/index.rst b/_sources/autoapi/fairchem/core/modules/evaluator/index.rst deleted file mode 100644 index 08326c3da..000000000 --- a/_sources/autoapi/fairchem/core/modules/evaluator/index.rst +++ /dev/null @@ -1,122 +0,0 @@ -:py:mod:`fairchem.core.modules.evaluator` -========================================= - -.. py:module:: fairchem.core.modules.evaluator - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.evaluator.Evaluator - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.evaluator.forcesx_mae - fairchem.core.modules.evaluator.forcesx_mse - fairchem.core.modules.evaluator.forcesy_mae - fairchem.core.modules.evaluator.forcesy_mse - fairchem.core.modules.evaluator.forcesz_mae - fairchem.core.modules.evaluator.forcesz_mse - fairchem.core.modules.evaluator.energy_forces_within_threshold - fairchem.core.modules.evaluator.energy_within_threshold - fairchem.core.modules.evaluator.average_distance_within_threshold - fairchem.core.modules.evaluator.min_diff - fairchem.core.modules.evaluator.cosine_similarity - fairchem.core.modules.evaluator.mae - fairchem.core.modules.evaluator.mse - fairchem.core.modules.evaluator.magnitude_error - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.evaluator.NONE - - -.. py:data:: NONE - - - -.. py:class:: Evaluator(task: str | None = None, eval_metrics: dict | None = None) - - - .. py:attribute:: task_metrics - :type: ClassVar[dict[str, str]] - - - - .. py:attribute:: task_primary_metric - :type: ClassVar[dict[str, str | None]] - - - - .. py:method:: eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics=None) - - - .. py:method:: update(key, stat, metrics) - - - -.. py:function:: forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) - - -.. py:function:: forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) - - -.. py:function:: forcesy_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) - - -.. py:function:: forcesy_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) - - -.. py:function:: forcesz_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) - - -.. py:function:: forcesz_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) - - -.. py:function:: energy_forces_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] - - -.. py:function:: energy_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] - - -.. py:function:: average_distance_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int] - - -.. py:function:: min_diff(pred_pos: torch.Tensor, dft_pos: torch.Tensor, cell: torch.Tensor, pbc: torch.Tensor) - - -.. py:function:: cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) - - -.. py:function:: mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int] - - -.. py:function:: mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int] - - -.. py:function:: magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE, p: int = 2) -> dict[str, float | int] - - diff --git a/_sources/autoapi/fairchem/core/modules/exponential_moving_average/index.rst b/_sources/autoapi/fairchem/core/modules/exponential_moving_average/index.rst deleted file mode 100644 index e8381d965..000000000 --- a/_sources/autoapi/fairchem/core/modules/exponential_moving_average/index.rst +++ /dev/null @@ -1,100 +0,0 @@ -:py:mod:`fairchem.core.modules.exponential_moving_average` -========================================================== - -.. py:module:: fairchem.core.modules.exponential_moving_average - -.. autoapi-nested-parse:: - - Copied (and improved) from: - https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py (MIT license) - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.exponential_moving_average.ExponentialMovingAverage - - - - -.. py:class:: ExponentialMovingAverage(parameters: collections.abc.Iterable[torch.nn.Parameter], decay: float, use_num_updates: bool = False) - - - Maintains (exponential) moving average of a set of parameters. - - :param parameters: Iterable of `torch.nn.Parameter` (typically from - `model.parameters()`). - :param decay: The exponential decay. - :param use_num_updates: Whether to use number of updates when computing - averages. - - .. py:method:: _get_parameters(parameters: collections.abc.Iterable[torch.nn.Parameter] | None) -> collections.abc.Iterable[torch.nn.Parameter] - - - .. py:method:: update(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None - - Update currently maintained parameters. - - Call this every time the parameters are updated, such as the result of - the `optimizer.step()` call. - - :param parameters: Iterable of `torch.nn.Parameter`; usually the same set of - parameters used to initialize this object. If `None`, the - parameters with which this `ExponentialMovingAverage` was - initialized will be used. - - - .. py:method:: copy_to(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None - - Copy current parameters into given collection of parameters. - - :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored moving averages. If `None`, the - parameters with which this `ExponentialMovingAverage` was - initialized will be used. - - - .. py:method:: store(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None - - Save the current parameters for restoring later. - - :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. If `None`, the parameters of with which this - `ExponentialMovingAverage` was initialized will be used. - - - .. py:method:: restore(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) -> None - - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - - :param parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. If `None`, the - parameters with which this `ExponentialMovingAverage` was - initialized will be used. - - - .. py:method:: state_dict() -> dict - - Returns the state of the ExponentialMovingAverage as a dict. - - - .. py:method:: load_state_dict(state_dict: dict) -> None - - Loads the ExponentialMovingAverage state. - - :param state_dict: EMA state. Should be an object returned - from a call to :meth:`state_dict`. - :type state_dict: dict - - - diff --git a/_sources/autoapi/fairchem/core/modules/index.rst b/_sources/autoapi/fairchem/core/modules/index.rst deleted file mode 100644 index 1b41f0d94..000000000 --- a/_sources/autoapi/fairchem/core/modules/index.rst +++ /dev/null @@ -1,37 +0,0 @@ -:py:mod:`fairchem.core.modules` -=============================== - -.. py:module:: fairchem.core.modules - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - scaling/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - evaluator/index.rst - exponential_moving_average/index.rst - loss/index.rst - normalizer/index.rst - scheduler/index.rst - transforms/index.rst - - diff --git a/_sources/autoapi/fairchem/core/modules/loss/index.rst b/_sources/autoapi/fairchem/core/modules/loss/index.rst deleted file mode 100644 index 32b443a00..000000000 --- a/_sources/autoapi/fairchem/core/modules/loss/index.rst +++ /dev/null @@ -1,141 +0,0 @@ -:py:mod:`fairchem.core.modules.loss` -==================================== - -.. py:module:: fairchem.core.modules.loss - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.loss.L2MAELoss - fairchem.core.modules.loss.AtomwiseL2Loss - fairchem.core.modules.loss.DDPLoss - - - - -.. py:class:: L2MAELoss(reduction: str = 'mean') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input: torch.Tensor, target: torch.Tensor) - - - -.. py:class:: AtomwiseL2Loss(reduction: str = 'mean') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) - - - -.. py:class:: DDPLoss(loss_fn, loss_name: str = 'mae', reduction: str = 'mean') - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor | None = None, batch_size: int | None = None) - - - diff --git a/_sources/autoapi/fairchem/core/modules/normalizer/index.rst b/_sources/autoapi/fairchem/core/modules/normalizer/index.rst deleted file mode 100644 index 7ee248843..000000000 --- a/_sources/autoapi/fairchem/core/modules/normalizer/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -:py:mod:`fairchem.core.modules.normalizer` -========================================== - -.. py:module:: fairchem.core.modules.normalizer - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.normalizer.Normalizer - - - - -.. py:class:: Normalizer(tensor: torch.Tensor | None = None, mean=None, std=None, device=None) - - - Normalize a Tensor and restore it later. - - .. py:method:: to(device) -> None - - - .. py:method:: norm(tensor: torch.Tensor) -> torch.Tensor - - - .. py:method:: denorm(normed_tensor: torch.Tensor) -> torch.Tensor - - - .. py:method:: state_dict() - - - .. py:method:: load_state_dict(state_dict) -> None - - - diff --git a/_sources/autoapi/fairchem/core/modules/scaling/compat/index.rst b/_sources/autoapi/fairchem/core/modules/scaling/compat/index.rst deleted file mode 100644 index 4f462336d..000000000 --- a/_sources/autoapi/fairchem/core/modules/scaling/compat/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -:py:mod:`fairchem.core.modules.scaling.compat` -============================================== - -.. py:module:: fairchem.core.modules.scaling.compat - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.compat._load_scale_dict - fairchem.core.modules.scaling.compat.load_scales_compat - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.compat.ScaleDict - - -.. py:data:: ScaleDict - - - -.. py:function:: _load_scale_dict(scale_file: str | ScaleDict | None) - - Loads scale factors from either: - - a JSON file mapping scale factor names to scale values - - a python dictionary pickled object (loaded using `torch.load`) mapping scale factor names to scale values - - a dictionary mapping scale factor names to scale values - - -.. py:function:: load_scales_compat(module: torch.nn.Module, scale_file: str | ScaleDict | None) -> None - - diff --git a/_sources/autoapi/fairchem/core/modules/scaling/fit/index.rst b/_sources/autoapi/fairchem/core/modules/scaling/fit/index.rst deleted file mode 100644 index 028d2607b..000000000 --- a/_sources/autoapi/fairchem/core/modules/scaling/fit/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -:py:mod:`fairchem.core.modules.scaling.fit` -=========================================== - -.. py:module:: fairchem.core.modules.scaling.fit - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.fit._prefilled_input - fairchem.core.modules.scaling.fit._train_batch - fairchem.core.modules.scaling.fit.main - - - -.. py:function:: _prefilled_input(prompt: str, prefill: str = '') -> str - - -.. py:function:: _train_batch(trainer: fairchem.core.trainers.base_trainer.BaseTrainer, batch) -> None - - -.. py:function:: main(*, num_batches: int = 16) -> None - - diff --git a/_sources/autoapi/fairchem/core/modules/scaling/index.rst b/_sources/autoapi/fairchem/core/modules/scaling/index.rst deleted file mode 100644 index e5542d5c4..000000000 --- a/_sources/autoapi/fairchem/core/modules/scaling/index.rst +++ /dev/null @@ -1,116 +0,0 @@ -:py:mod:`fairchem.core.modules.scaling` -======================================= - -.. py:module:: fairchem.core.modules.scaling - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - compat/index.rst - fit/index.rst - scale_factor/index.rst - util/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.ScaleFactor - - - - -.. py:class:: ScaleFactor(name: str | None = None, enforce_consistency: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:property:: fitted - :type: bool - - - .. py:attribute:: scale_factor - :type: torch.Tensor - - - - .. py:attribute:: name - :type: str | None - - - - .. py:attribute:: index_fn - :type: IndexFn | None - - - - .. py:attribute:: stats - :type: _Stats | None - - - - .. py:method:: _enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) -> None - - - .. py:method:: reset_() -> None - - - .. py:method:: set_(scale: float | torch.Tensor) -> None - - - .. py:method:: initialize_(*, index_fn: IndexFn | None = None) -> None - - - .. py:method:: fit_context_() - - - .. py:method:: fit_() - - - .. py:method:: _observe(x: torch.Tensor, ref: torch.Tensor | None = None) -> None - - - .. py:method:: forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/modules/scaling/scale_factor/index.rst b/_sources/autoapi/fairchem/core/modules/scaling/scale_factor/index.rst deleted file mode 100644 index bdde37911..000000000 --- a/_sources/autoapi/fairchem/core/modules/scaling/scale_factor/index.rst +++ /dev/null @@ -1,159 +0,0 @@ -:py:mod:`fairchem.core.modules.scaling.scale_factor` -==================================================== - -.. py:module:: fairchem.core.modules.scaling.scale_factor - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.scale_factor._Stats - fairchem.core.modules.scaling.scale_factor.ScaleFactor - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.scale_factor._check_consistency - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.scale_factor.IndexFn - - -.. py:class:: _Stats - - - Bases: :py:obj:`TypedDict` - - dict() -> new empty dictionary - dict(mapping) -> new dictionary initialized from a mapping object's - (key, value) pairs - dict(iterable) -> new dictionary initialized as if via: - d = {} - for k, v in iterable: - d[k] = v - dict(**kwargs) -> new dictionary initialized with the name=value pairs - in the keyword argument list. For example: dict(one=1, two=2) - - .. py:attribute:: variance_in - :type: float - - - - .. py:attribute:: variance_out - :type: float - - - - .. py:attribute:: n_samples - :type: int - - - - -.. py:data:: IndexFn - - - -.. py:function:: _check_consistency(old: torch.Tensor, new: torch.Tensor, key: str) -> None - - -.. py:class:: ScaleFactor(name: str | None = None, enforce_consistency: bool = True) - - - Bases: :py:obj:`torch.nn.Module` - - Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call :meth:`to`, etc. - - .. note:: - As per the example above, an ``__init__()`` call to the parent class - must be made before assignment on the child. - - :ivar training: Boolean represents whether this module is in training or - evaluation mode. - :vartype training: bool - - .. py:property:: fitted - :type: bool - - - .. py:attribute:: scale_factor - :type: torch.Tensor - - - - .. py:attribute:: name - :type: str | None - - - - .. py:attribute:: index_fn - :type: IndexFn | None - - - - .. py:attribute:: stats - :type: _Stats | None - - - - .. py:method:: _enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) -> None - - - .. py:method:: reset_() -> None - - - .. py:method:: set_(scale: float | torch.Tensor) -> None - - - .. py:method:: initialize_(*, index_fn: IndexFn | None = None) -> None - - - .. py:method:: fit_context_() - - - .. py:method:: fit_() - - - .. py:method:: _observe(x: torch.Tensor, ref: torch.Tensor | None = None) -> None - - - .. py:method:: forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) -> torch.Tensor - - - diff --git a/_sources/autoapi/fairchem/core/modules/scaling/util/index.rst b/_sources/autoapi/fairchem/core/modules/scaling/util/index.rst deleted file mode 100644 index 156d2c241..000000000 --- a/_sources/autoapi/fairchem/core/modules/scaling/util/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:py:mod:`fairchem.core.modules.scaling.util` -============================================ - -.. py:module:: fairchem.core.modules.scaling.util - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scaling.util.ensure_fitted - - - -.. py:function:: ensure_fitted(module: torch.nn.Module, warn: bool = False) -> None - - diff --git a/_sources/autoapi/fairchem/core/modules/scheduler/index.rst b/_sources/autoapi/fairchem/core/modules/scheduler/index.rst deleted file mode 100644 index 7fdd4c914..000000000 --- a/_sources/autoapi/fairchem/core/modules/scheduler/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -:py:mod:`fairchem.core.modules.scheduler` -========================================= - -.. py:module:: fairchem.core.modules.scheduler - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.scheduler.LRScheduler - - - - -.. py:class:: LRScheduler(optimizer, config) - - - Learning rate scheduler class for torch.optim learning rate schedulers - - .. rubric:: Notes - - If no learning rate scheduler is specified in the config the default - scheduler is warmup_lr_lambda (fairchem.core.common.utils) not no scheduler, - this is for backward-compatibility reasons. To run without a lr scheduler - specify scheduler: "Null" in the optim section of the config. - - :param optimizer: torch optim object - :type optimizer: obj - :param config: Optim dict from the input config - :type config: dict - - .. py:method:: step(metrics=None, epoch=None) -> None - - - .. py:method:: filter_kwargs(config) - - - .. py:method:: get_lr() - - - diff --git a/_sources/autoapi/fairchem/core/modules/transforms/index.rst b/_sources/autoapi/fairchem/core/modules/transforms/index.rst deleted file mode 100644 index 2da555a56..000000000 --- a/_sources/autoapi/fairchem/core/modules/transforms/index.rst +++ /dev/null @@ -1,37 +0,0 @@ -:py:mod:`fairchem.core.modules.transforms` -========================================== - -.. py:module:: fairchem.core.modules.transforms - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.transforms.DataTransforms - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.modules.transforms.decompose_tensor - - - -.. py:class:: DataTransforms(config) - - - .. py:method:: __call__(data_object) - - - -.. py:function:: decompose_tensor(data_object, config) -> torch_geometric.data.Data - - diff --git a/_sources/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.rst b/_sources/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.rst deleted file mode 100644 index df8127591..000000000 --- a/_sources/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.rst +++ /dev/null @@ -1,206 +0,0 @@ -:py:mod:`fairchem.core.preprocessing.atoms_to_graphs` -===================================================== - -.. py:module:: fairchem.core.preprocessing.atoms_to_graphs - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.preprocessing.atoms_to_graphs.AtomsToGraphs - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.preprocessing.atoms_to_graphs.AseAtomsAdaptor - fairchem.core.preprocessing.atoms_to_graphs.shell - - -.. py:data:: AseAtomsAdaptor - - - -.. py:data:: shell - - - -.. py:class:: AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None) - - - A class to help convert periodic atomic structures to graphs. - - The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts - them into graph representations for use in PyTorch. The primary purpose of this class is to determine the - nearest neighbors within some radius around each individual atom, taking into account PBC, and set the - pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information - are put into a PyTorch geometric data object for use with PyTorch. - - :param max_neigh: Maximum number of neighbors to consider. - :type max_neigh: int - :param radius: Cutoff radius in Angstroms to search for neighbors. - :type radius: int or float - :param r_energy: Return the energy with other properties. Default is False, so the energy will not be returned. - :type r_energy: bool - :param r_forces: Return the forces with other properties. Default is False, so the forces will not be returned. - :type r_forces: bool - :param r_stress: Return the stress with other properties. Default is False, so the stress will not be returned. - :type r_stress: bool - :param r_distances: Return the distances with other properties. - :type r_distances: bool - :param Default is False: - :param so the distances will not be returned.: - :param r_edges: Return interatomic edges with other properties. Default is True, so edges will be returned. - :type r_edges: bool - :param r_fixed: Return a binary vector with flags for fixed (1) vs free (0) atoms. - :type r_fixed: bool - :param Default is True: - :param so the fixed indices will be returned.: - :param r_pbc: Return the periodic boundary conditions with other properties. - :type r_pbc: bool - :param Default is False: - :param so the periodic boundary conditions will not be returned.: - :param r_data_keys: Return values corresponding to given keys in atoms.info data with other - :type r_data_keys: sequence of str, optional - :param properties. Default is None: - :param so no data will be returned as properties.: - - .. attribute:: max_neigh - - Maximum number of neighbors to consider. - - :type: int - - .. attribute:: radius - - Cutoff radius in Angstoms to search for neighbors. - - :type: int or float - - .. attribute:: r_energy - - Return the energy with other properties. Default is False, so the energy will not be returned. - - :type: bool - - .. attribute:: r_forces - - Return the forces with other properties. Default is False, so the forces will not be returned. - - :type: bool - - .. attribute:: r_stress - - Return the stress with other properties. Default is False, so the stress will not be returned. - - :type: bool - - .. attribute:: r_distances - - Return the distances with other properties. - - :type: bool - - .. attribute:: Default is False, so the distances will not be returned. - - - - .. attribute:: r_edges - - Return interatomic edges with other properties. Default is True, so edges will be returned. - - :type: bool - - .. attribute:: r_fixed - - Return a binary vector with flags for fixed (1) vs free (0) atoms. - - :type: bool - - .. attribute:: Default is True, so the fixed indices will be returned. - - - - .. attribute:: r_pbc - - Return the periodic boundary conditions with other properties. - - :type: bool - - .. attribute:: Default is False, so the periodic boundary conditions will not be returned. - - - - .. attribute:: r_data_keys - - Return values corresponding to given keys in atoms.info data with other - - :type: sequence of str, optional - - .. attribute:: properties. Default is None, so no data will be returned as properties. - - - - .. py:method:: _get_neighbors_pymatgen(atoms: ase.Atoms) - - Preforms nearest neighbor search and returns edge index, distances, - and cell offsets - - - .. py:method:: _reshape_features(c_index, n_index, n_distance, offsets) - - Stack center and neighbor index and reshapes distances, - takes in np.arrays and returns torch tensors - - - .. py:method:: convert(atoms: ase.Atoms, sid=None) - - Convert a single atomic structure to a graph. - - :param atoms: An ASE atoms object. - :type atoms: ase.atoms.Atoms - :param sid: An identifier that can be used to track the structure in downstream - :type sid: uniquely identifying object - :param tasks. Common sids used in OCP datasets include unique strings or integers.: - - :returns: A torch geometic data object with positions, atomic_numbers, tags, - and optionally, energy, forces, distances, edges, and periodic boundary conditions. - Optional properties can included by setting r_property=True when constructing the class. - :rtype: data (torch_geometric.data.Data) - - - .. py:method:: convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False) - - Convert all atoms objects in a list or in an ase.db to graphs. - - :param atoms_collection: - :type atoms_collection: list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database - :param Either a list of ASE atoms objects or an ASE database.: - :param processed_file_path: - :type processed_file_path: str - :param A string of the path to where the processed file will be written. Default is None.: - :param collate_and_save: A boolean to collate and save or not. Default is False, so will not write a file. - :type collate_and_save: bool - - :returns: A list of torch geometric data objects containing molecular graph info and properties. - :rtype: data_list (list of torch_geometric.data.Data) - - - diff --git a/_sources/autoapi/fairchem/core/preprocessing/index.rst b/_sources/autoapi/fairchem/core/preprocessing/index.rst deleted file mode 100644 index 4f1d1ad37..000000000 --- a/_sources/autoapi/fairchem/core/preprocessing/index.rst +++ /dev/null @@ -1,198 +0,0 @@ -:py:mod:`fairchem.core.preprocessing` -===================================== - -.. py:module:: fairchem.core.preprocessing - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - atoms_to_graphs/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.preprocessing.AtomsToGraphs - - - - -.. py:class:: AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None) - - - A class to help convert periodic atomic structures to graphs. - - The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts - them into graph representations for use in PyTorch. The primary purpose of this class is to determine the - nearest neighbors within some radius around each individual atom, taking into account PBC, and set the - pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information - are put into a PyTorch geometric data object for use with PyTorch. - - :param max_neigh: Maximum number of neighbors to consider. - :type max_neigh: int - :param radius: Cutoff radius in Angstroms to search for neighbors. - :type radius: int or float - :param r_energy: Return the energy with other properties. Default is False, so the energy will not be returned. - :type r_energy: bool - :param r_forces: Return the forces with other properties. Default is False, so the forces will not be returned. - :type r_forces: bool - :param r_stress: Return the stress with other properties. Default is False, so the stress will not be returned. - :type r_stress: bool - :param r_distances: Return the distances with other properties. - :type r_distances: bool - :param Default is False: - :param so the distances will not be returned.: - :param r_edges: Return interatomic edges with other properties. Default is True, so edges will be returned. - :type r_edges: bool - :param r_fixed: Return a binary vector with flags for fixed (1) vs free (0) atoms. - :type r_fixed: bool - :param Default is True: - :param so the fixed indices will be returned.: - :param r_pbc: Return the periodic boundary conditions with other properties. - :type r_pbc: bool - :param Default is False: - :param so the periodic boundary conditions will not be returned.: - :param r_data_keys: Return values corresponding to given keys in atoms.info data with other - :type r_data_keys: sequence of str, optional - :param properties. Default is None: - :param so no data will be returned as properties.: - - .. attribute:: max_neigh - - Maximum number of neighbors to consider. - - :type: int - - .. attribute:: radius - - Cutoff radius in Angstoms to search for neighbors. - - :type: int or float - - .. attribute:: r_energy - - Return the energy with other properties. Default is False, so the energy will not be returned. - - :type: bool - - .. attribute:: r_forces - - Return the forces with other properties. Default is False, so the forces will not be returned. - - :type: bool - - .. attribute:: r_stress - - Return the stress with other properties. Default is False, so the stress will not be returned. - - :type: bool - - .. attribute:: r_distances - - Return the distances with other properties. - - :type: bool - - .. attribute:: Default is False, so the distances will not be returned. - - - - .. attribute:: r_edges - - Return interatomic edges with other properties. Default is True, so edges will be returned. - - :type: bool - - .. attribute:: r_fixed - - Return a binary vector with flags for fixed (1) vs free (0) atoms. - - :type: bool - - .. attribute:: Default is True, so the fixed indices will be returned. - - - - .. attribute:: r_pbc - - Return the periodic boundary conditions with other properties. - - :type: bool - - .. attribute:: Default is False, so the periodic boundary conditions will not be returned. - - - - .. attribute:: r_data_keys - - Return values corresponding to given keys in atoms.info data with other - - :type: sequence of str, optional - - .. attribute:: properties. Default is None, so no data will be returned as properties. - - - - .. py:method:: _get_neighbors_pymatgen(atoms: ase.Atoms) - - Preforms nearest neighbor search and returns edge index, distances, - and cell offsets - - - .. py:method:: _reshape_features(c_index, n_index, n_distance, offsets) - - Stack center and neighbor index and reshapes distances, - takes in np.arrays and returns torch tensors - - - .. py:method:: convert(atoms: ase.Atoms, sid=None) - - Convert a single atomic structure to a graph. - - :param atoms: An ASE atoms object. - :type atoms: ase.atoms.Atoms - :param sid: An identifier that can be used to track the structure in downstream - :type sid: uniquely identifying object - :param tasks. Common sids used in OCP datasets include unique strings or integers.: - - :returns: A torch geometic data object with positions, atomic_numbers, tags, - and optionally, energy, forces, distances, edges, and periodic boundary conditions. - Optional properties can included by setting r_property=True when constructing the class. - :rtype: data (torch_geometric.data.Data) - - - .. py:method:: convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False) - - Convert all atoms objects in a list or in an ase.db to graphs. - - :param atoms_collection: - :type atoms_collection: list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database - :param Either a list of ASE atoms objects or an ASE database.: - :param processed_file_path: - :type processed_file_path: str - :param A string of the path to where the processed file will be written. Default is None.: - :param collate_and_save: A boolean to collate and save or not. Default is False, so will not write a file. - :type collate_and_save: bool - - :returns: A list of torch geometric data objects containing molecular graph info and properties. - :rtype: data_list (list of torch_geometric.data.Data) - - - diff --git a/_sources/autoapi/fairchem/core/scripts/download_data/index.rst b/_sources/autoapi/fairchem/core/scripts/download_data/index.rst deleted file mode 100644 index 97c09fa34..000000000 --- a/_sources/autoapi/fairchem/core/scripts/download_data/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -:py:mod:`fairchem.core.scripts.download_data` -============================================= - -.. py:module:: fairchem.core.scripts.download_data - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.download_data.get_data - fairchem.core.scripts.download_data.uncompress_data - fairchem.core.scripts.download_data.preprocess_data - fairchem.core.scripts.download_data.verify_count - fairchem.core.scripts.download_data.cleanup - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.download_data.DOWNLOAD_LINKS_s2ef - fairchem.core.scripts.download_data.DOWNLOAD_LINKS_is2re - fairchem.core.scripts.download_data.S2EF_COUNTS - fairchem.core.scripts.download_data.parser - - -.. py:data:: DOWNLOAD_LINKS_s2ef - :type: dict[str, dict[str, str]] - - - -.. py:data:: DOWNLOAD_LINKS_is2re - :type: dict[str, str] - - - -.. py:data:: S2EF_COUNTS - - - -.. py:function:: get_data(datadir: str, task: str, split: str | None, del_intmd_files: bool) -> None - - -.. py:function:: uncompress_data(compressed_dir: str) -> str - - -.. py:function:: preprocess_data(uncompressed_dir: str, output_path: str) -> None - - -.. py:function:: verify_count(output_path: str, task: str, split: str) -> None - - -.. py:function:: cleanup(filename: str, dirname: str) -> None - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.rst b/_sources/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.rst deleted file mode 100644 index 8649498a5..000000000 --- a/_sources/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.rst +++ /dev/null @@ -1,57 +0,0 @@ -:py:mod:`fairchem.core.scripts.gif_maker_parallelized` -====================================================== - -.. py:module:: fairchem.core.scripts.gif_maker_parallelized - -.. autoapi-nested-parse:: - - Script to generate gifs from traj - - Note: - This is just a quick way to generate gifs and visalizations from traj, there are many parameters and settings in the code that people can vary to make visualizations better. We have chosen these settings as this seem to work fine for most of our systems. - - Requirements: - - povray - ffmpeg - ase==3.21 - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.gif_maker_parallelized.pov_from_atoms - fairchem.core.scripts.gif_maker_parallelized.parallelize_generation - fairchem.core.scripts.gif_maker_parallelized.get_parser - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.gif_maker_parallelized.parser - - -.. py:function:: pov_from_atoms(mp_args) -> None - - -.. py:function:: parallelize_generation(traj_path, out_path: str, n_procs) -> None - - -.. py:function:: get_parser() -> argparse.ArgumentParser - - -.. py:data:: parser - :type: argparse.ArgumentParser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/hpo/index.rst b/_sources/autoapi/fairchem/core/scripts/hpo/index.rst deleted file mode 100644 index 7045188e3..000000000 --- a/_sources/autoapi/fairchem/core/scripts/hpo/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:py:mod:`fairchem.core.scripts.hpo` -=================================== - -.. py:module:: fairchem.core.scripts.hpo - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - run_tune/index.rst - run_tune_pbt/index.rst - - diff --git a/_sources/autoapi/fairchem/core/scripts/hpo/run_tune/index.rst b/_sources/autoapi/fairchem/core/scripts/hpo/run_tune/index.rst deleted file mode 100644 index 2ce7fcdba..000000000 --- a/_sources/autoapi/fairchem/core/scripts/hpo/run_tune/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.core.scripts.hpo.run_tune` -============================================ - -.. py:module:: fairchem.core.scripts.hpo.run_tune - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.hpo.run_tune.ocp_trainable - fairchem.core.scripts.hpo.run_tune.main - - - -.. py:function:: ocp_trainable(config, checkpoint_dir=None) -> None - - -.. py:function:: main() -> None - - diff --git a/_sources/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.rst b/_sources/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.rst deleted file mode 100644 index f5c4241c8..000000000 --- a/_sources/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.core.scripts.hpo.run_tune_pbt` -================================================ - -.. py:module:: fairchem.core.scripts.hpo.run_tune_pbt - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.hpo.run_tune_pbt.ocp_trainable - fairchem.core.scripts.hpo.run_tune_pbt.main - - - -.. py:function:: ocp_trainable(config, checkpoint_dir=None) -> None - - -.. py:function:: main() -> None - - diff --git a/_sources/autoapi/fairchem/core/scripts/index.rst b/_sources/autoapi/fairchem/core/scripts/index.rst deleted file mode 100644 index 0a19fdfc7..000000000 --- a/_sources/autoapi/fairchem/core/scripts/index.rst +++ /dev/null @@ -1,39 +0,0 @@ -:py:mod:`fairchem.core.scripts` -=============================== - -.. py:module:: fairchem.core.scripts - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - hpo/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - download_data/index.rst - gif_maker_parallelized/index.rst - make_challenge_submission_file/index.rst - make_lmdb_sizes/index.rst - make_submission_file/index.rst - preprocess_ef/index.rst - preprocess_relaxed/index.rst - uncompress/index.rst - - diff --git a/_sources/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.rst b/_sources/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.rst deleted file mode 100644 index 30ddb2bee..000000000 --- a/_sources/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.rst +++ /dev/null @@ -1,54 +0,0 @@ -:py:mod:`fairchem.core.scripts.make_challenge_submission_file` -============================================================== - -.. py:module:: fairchem.core.scripts.make_challenge_submission_file - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - ONLY for use in the NeurIPS 2021 Open Catalyst Challenge. For all other submissions - please use make_submission_file.py. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_challenge_submission_file.write_is2re_relaxations - fairchem.core.scripts.make_challenge_submission_file.write_predictions - fairchem.core.scripts.make_challenge_submission_file.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_challenge_submission_file.parser - - -.. py:function:: write_is2re_relaxations(path: str, filename: str, hybrid) -> None - - -.. py:function:: write_predictions(path: str, filename: str) -> None - - -.. py:function:: main(args: argparse.Namespace) -> None - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.rst b/_sources/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.rst deleted file mode 100644 index 67981923c..000000000 --- a/_sources/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.rst +++ /dev/null @@ -1,44 +0,0 @@ -:py:mod:`fairchem.core.scripts.make_lmdb_sizes` -=============================================== - -.. py:module:: fairchem.core.scripts.make_lmdb_sizes - -.. autoapi-nested-parse:: - - This script provides the functionality to generate metadata.npz files necessary - for load_balancing the DataLoader. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_lmdb_sizes.get_data - fairchem.core.scripts.make_lmdb_sizes.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_lmdb_sizes.parser - - -.. py:function:: get_data(index) - - -.. py:function:: main(args) -> None - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/make_submission_file/index.rst b/_sources/autoapi/fairchem/core/scripts/make_submission_file/index.rst deleted file mode 100644 index f7a104d41..000000000 --- a/_sources/autoapi/fairchem/core/scripts/make_submission_file/index.rst +++ /dev/null @@ -1,55 +0,0 @@ -:py:mod:`fairchem.core.scripts.make_submission_file` -==================================================== - -.. py:module:: fairchem.core.scripts.make_submission_file - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_submission_file.write_is2re_relaxations - fairchem.core.scripts.make_submission_file.write_predictions - fairchem.core.scripts.make_submission_file.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.make_submission_file.SPLITS - fairchem.core.scripts.make_submission_file.parser - - -.. py:data:: SPLITS - - - -.. py:function:: write_is2re_relaxations(args) -> None - - -.. py:function:: write_predictions(args) -> None - - -.. py:function:: main(args: argparse.Namespace) -> None - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/preprocess_ef/index.rst b/_sources/autoapi/fairchem/core/scripts/preprocess_ef/index.rst deleted file mode 100644 index 85c99b9eb..000000000 --- a/_sources/autoapi/fairchem/core/scripts/preprocess_ef/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -:py:mod:`fairchem.core.scripts.preprocess_ef` -============================================= - -.. py:module:: fairchem.core.scripts.preprocess_ef - -.. autoapi-nested-parse:: - - Creates LMDB files with extracted graph features from provided *.extxyz files - for the S2EF task. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.preprocess_ef.write_images_to_lmdb - fairchem.core.scripts.preprocess_ef.main - fairchem.core.scripts.preprocess_ef.get_parser - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.preprocess_ef.parser - - -.. py:function:: write_images_to_lmdb(mp_arg) - - -.. py:function:: main(args: argparse.Namespace) -> None - - -.. py:function:: get_parser() -> argparse.ArgumentParser - - -.. py:data:: parser - :type: argparse.ArgumentParser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/preprocess_relaxed/index.rst b/_sources/autoapi/fairchem/core/scripts/preprocess_relaxed/index.rst deleted file mode 100644 index 87f280cc7..000000000 --- a/_sources/autoapi/fairchem/core/scripts/preprocess_relaxed/index.rst +++ /dev/null @@ -1,44 +0,0 @@ -:py:mod:`fairchem.core.scripts.preprocess_relaxed` -================================================== - -.. py:module:: fairchem.core.scripts.preprocess_relaxed - -.. autoapi-nested-parse:: - - Creates LMDB files with extracted graph features from provided *.extxyz files - for the S2EF task. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.preprocess_relaxed.write_images_to_lmdb - fairchem.core.scripts.preprocess_relaxed.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.preprocess_relaxed.parser - - -.. py:function:: write_images_to_lmdb(mp_arg) -> None - - -.. py:function:: main(args, split) -> None - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/core/scripts/uncompress/index.rst b/_sources/autoapi/fairchem/core/scripts/uncompress/index.rst deleted file mode 100644 index 6299ee61d..000000000 --- a/_sources/autoapi/fairchem/core/scripts/uncompress/index.rst +++ /dev/null @@ -1,53 +0,0 @@ -:py:mod:`fairchem.core.scripts.uncompress` -========================================== - -.. py:module:: fairchem.core.scripts.uncompress - -.. autoapi-nested-parse:: - - Uncompresses downloaded S2EF datasets to be used by the LMDB preprocessing - script - preprocess_ef.py - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.uncompress.read_lzma - fairchem.core.scripts.uncompress.decompress_list_of_files - fairchem.core.scripts.uncompress.get_parser - fairchem.core.scripts.uncompress.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.scripts.uncompress.parser - - -.. py:function:: read_lzma(inpfile: str, outfile: str) -> None - - -.. py:function:: decompress_list_of_files(ip_op_pair: tuple[str, str]) -> None - - -.. py:function:: get_parser() -> argparse.ArgumentParser - - -.. py:function:: main(args: argparse.Namespace) -> None - - -.. py:data:: parser - :type: argparse.ArgumentParser - - - diff --git a/_sources/autoapi/fairchem/core/tasks/index.rst b/_sources/autoapi/fairchem/core/tasks/index.rst deleted file mode 100644 index cc9648135..000000000 --- a/_sources/autoapi/fairchem/core/tasks/index.rst +++ /dev/null @@ -1,70 +0,0 @@ -:py:mod:`fairchem.core.tasks` -============================= - -.. py:module:: fairchem.core.tasks - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - task/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tasks.PredictTask - fairchem.core.tasks.RelaxationTask - fairchem.core.tasks.TrainTask - fairchem.core.tasks.ValidateTask - - - - -.. py:class:: PredictTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - -.. py:class:: RelaxationTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - -.. py:class:: TrainTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: _process_error(e: RuntimeError) -> None - - - .. py:method:: run() -> None - - - -.. py:class:: ValidateTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tasks/task/index.rst b/_sources/autoapi/fairchem/core/tasks/task/index.rst deleted file mode 100644 index 08784d796..000000000 --- a/_sources/autoapi/fairchem/core/tasks/task/index.rst +++ /dev/null @@ -1,81 +0,0 @@ -:py:mod:`fairchem.core.tasks.task` -================================== - -.. py:module:: fairchem.core.tasks.task - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tasks.task.BaseTask - fairchem.core.tasks.task.TrainTask - fairchem.core.tasks.task.PredictTask - fairchem.core.tasks.task.ValidateTask - fairchem.core.tasks.task.RelaxationTask - - - - -.. py:class:: BaseTask(config) - - - .. py:method:: setup(trainer) -> None - - - .. py:method:: run() - :abstractmethod: - - - -.. py:class:: TrainTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: _process_error(e: RuntimeError) -> None - - - .. py:method:: run() -> None - - - -.. py:class:: PredictTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - -.. py:class:: ValidateTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - -.. py:class:: RelaxationTask(config) - - - Bases: :py:obj:`BaseTask` - - .. py:method:: run() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/common/test_ase_calculator/index.rst b/_sources/autoapi/fairchem/core/tests/common/test_ase_calculator/index.rst deleted file mode 100644 index b08f38e72..000000000 --- a/_sources/autoapi/fairchem/core/tests/common/test_ase_calculator/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -:py:mod:`fairchem.core.tests.common.test_ase_calculator` -======================================================== - -.. py:module:: fairchem.core.tests.common.test_ase_calculator - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.common.test_ase_calculator.atoms - fairchem.core.tests.common.test_ase_calculator.checkpoint_path - fairchem.core.tests.common.test_ase_calculator.test_calculator_setup - fairchem.core.tests.common.test_ase_calculator.test_relaxation_final_energy - fairchem.core.tests.common.test_ase_calculator.test_random_seed_final_energy - - - -.. py:function:: atoms() -> ase.Atoms - - -.. py:function:: checkpoint_path(request, tmp_path) - - -.. py:function:: test_calculator_setup(checkpoint_path) - - -.. py:function:: test_relaxation_final_energy(atoms, tmp_path, snapshot) -> None - - -.. py:function:: test_random_seed_final_energy(atoms, tmp_path) - - diff --git a/_sources/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.rst b/_sources/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.rst deleted file mode 100644 index 08cf57e33..000000000 --- a/_sources/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.rst +++ /dev/null @@ -1,112 +0,0 @@ -:py:mod:`fairchem.core.tests.common.test_data_parallel_batch_sampler` -===================================================================== - -.. py:module:: fairchem.core.tests.common.test_data_parallel_batch_sampler - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.common.test_data_parallel_batch_sampler._temp_file - fairchem.core.tests.common.test_data_parallel_batch_sampler.valid_path_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.invalid_path_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.invalid_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_lowercase - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_mode - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_path_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_valid_dataset - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_disabled - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_single_node - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_noshuffle - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_vs_distributed_sampler - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas - fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas_drop_last - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.common.test_data_parallel_batch_sampler.DATA - fairchem.core.tests.common.test_data_parallel_batch_sampler.SIZE_ATOMS - fairchem.core.tests.common.test_data_parallel_batch_sampler.SIZE_NEIGHBORS - fairchem.core.tests.common.test_data_parallel_batch_sampler.T_co - - -.. py:data:: DATA - :value: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - - - -.. py:data:: SIZE_ATOMS - :value: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - - - -.. py:data:: SIZE_NEIGHBORS - :value: [4, 4, 4, 4, 4, 4, 4, 4, 4, 4] - - - -.. py:data:: T_co - - - -.. py:function:: _temp_file(name: str) - - -.. py:function:: valid_path_dataset() - - -.. py:function:: invalid_path_dataset() - - -.. py:function:: invalid_dataset() - - -.. py:function:: test_lowercase(invalid_dataset) -> None - - -.. py:function:: test_invalid_mode(invalid_dataset) -> None - - -.. py:function:: test_invalid_dataset(invalid_dataset) -> None - - -.. py:function:: test_invalid_path_dataset(invalid_path_dataset) -> None - - -.. py:function:: test_valid_dataset(valid_path_dataset) -> None - - -.. py:function:: test_disabled(valid_path_dataset) -> None - - -.. py:function:: test_single_node(valid_path_dataset) -> None - - -.. py:function:: test_stateful_distributed_sampler_noshuffle(valid_path_dataset) -> None - - -.. py:function:: test_stateful_distributed_sampler_vs_distributed_sampler(valid_path_dataset) -> None - - -.. py:function:: test_stateful_distributed_sampler(valid_path_dataset) -> None - - -.. py:function:: test_stateful_distributed_sampler_numreplicas(valid_path_dataset) -> None - - -.. py:function:: test_stateful_distributed_sampler_numreplicas_drop_last(valid_path_dataset) -> None - - diff --git a/_sources/autoapi/fairchem/core/tests/common/test_yaml_loader/index.rst b/_sources/autoapi/fairchem/core/tests/common/test_yaml_loader/index.rst deleted file mode 100644 index a387b4074..000000000 --- a/_sources/autoapi/fairchem/core/tests/common/test_yaml_loader/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -:py:mod:`fairchem.core.tests.common.test_yaml_loader` -===================================================== - -.. py:module:: fairchem.core.tests.common.test_yaml_loader - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.common.test_yaml_loader.invalid_yaml_config - fairchem.core.tests.common.test_yaml_loader.valid_yaml_config - fairchem.core.tests.common.test_yaml_loader.test_invalid_config - fairchem.core.tests.common.test_yaml_loader.test_valid_config - - - -.. py:function:: invalid_yaml_config() - - -.. py:function:: valid_yaml_config() - - -.. py:function:: test_invalid_config(invalid_yaml_config) - - -.. py:function:: test_valid_config(valid_yaml_config) - - diff --git a/_sources/autoapi/fairchem/core/tests/conftest/index.rst b/_sources/autoapi/fairchem/core/tests/conftest/index.rst deleted file mode 100644 index 1a2997a2c..000000000 --- a/_sources/autoapi/fairchem/core/tests/conftest/index.rst +++ /dev/null @@ -1,110 +0,0 @@ -:py:mod:`fairchem.core.tests.conftest` -====================================== - -.. py:module:: fairchem.core.tests.conftest - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.conftest.Approx - fairchem.core.tests.conftest._ApproxNumpyFormatter - fairchem.core.tests.conftest.ApproxExtension - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.conftest._try_parse_approx - fairchem.core.tests.conftest.snapshot - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.conftest.DEFAULT_RTOL - fairchem.core.tests.conftest.DEFAULT_ATOL - - -.. py:data:: DEFAULT_RTOL - :value: 0.001 - - - -.. py:data:: DEFAULT_ATOL - :value: 0.001 - - - -.. py:class:: Approx(data: numpy.ndarray | list, *, rtol: float | None = None, atol: float | None = None) - - - Wrapper object for approximately compared numpy arrays. - - .. py:method:: __repr__() -> str - - Return repr(self). - - - -.. py:class:: _ApproxNumpyFormatter(data) - - - .. py:method:: __repr__() -> str - - Return repr(self). - - - -.. py:function:: _try_parse_approx(data: syrupy.types.SerializableData) -> Approx | None - - Parse the string representation of an Approx object. - We can just use eval here, since we know the string is safe. - - -.. py:class:: ApproxExtension - - - Bases: :py:obj:`syrupy.extensions.amber.AmberSnapshotExtension` - - By default, syrupy uses the __repr__ of the expected (snapshot) and actual values - to serialize them into strings. Then, it compares the strings to see if they match. - - However, this behavior is not ideal for comparing floats/ndarrays. For example, - if we have a snapshot with a float value of 0.1, and the actual value is 0.10000000000000001, - then the strings will not match, even though the values are effectively equal. - - To work around this, we override the serialize method to seralize the expected value - into a special representation. Then, we override the matches function (which originally does a - simple string comparison) to parse the expected and actual values into numpy arrays. - Finally, we compare the arrays using np.allclose. - - .. py:method:: matches(*, serialized_data: syrupy.types.SerializableData, snapshot_data: syrupy.types.SerializableData) -> bool - - - .. py:method:: serialize(data, **kwargs) - - - -.. py:function:: snapshot(snapshot) - - diff --git a/_sources/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.rst b/_sources/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.rst deleted file mode 100644 index 5e5d4cfec..000000000 --- a/_sources/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.rst +++ /dev/null @@ -1,63 +0,0 @@ -:py:mod:`fairchem.core.tests.datasets.test_ase_datasets` -======================================================== - -.. py:module:: fairchem.core.tests.datasets.test_ase_datasets - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.datasets.test_ase_datasets.ase_dataset - fairchem.core.tests.datasets.test_ase_datasets.test_ase_dataset - fairchem.core.tests.datasets.test_ase_datasets.test_ase_read_dataset - fairchem.core.tests.datasets.test_ase_datasets.test_ase_metadata_guesser - fairchem.core.tests.datasets.test_ase_datasets.test_db_add_delete - fairchem.core.tests.datasets.test_ase_datasets.test_ase_multiread_dataset - fairchem.core.tests.datasets.test_ase_datasets.test_empty_dataset - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.datasets.test_ase_datasets.structures - fairchem.core.tests.datasets.test_ase_datasets.calc - - -.. py:data:: structures - - - -.. py:data:: calc - - - -.. py:function:: ase_dataset(request, tmp_path_factory) - - -.. py:function:: test_ase_dataset(ase_dataset) - - -.. py:function:: test_ase_read_dataset(tmp_path) -> None - - -.. py:function:: test_ase_metadata_guesser(ase_dataset) -> None - - -.. py:function:: test_db_add_delete(tmp_path) -> None - - -.. py:function:: test_ase_multiread_dataset(tmp_path) -> None - - -.. py:function:: test_empty_dataset(tmp_path) - - diff --git a/_sources/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.rst b/_sources/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.rst deleted file mode 100644 index 0fb5fde70..000000000 --- a/_sources/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.rst +++ /dev/null @@ -1,82 +0,0 @@ -:py:mod:`fairchem.core.tests.datasets.test_ase_lmdb` -==================================================== - -.. py:module:: fairchem.core.tests.datasets.test_ase_lmdb - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.datasets.test_ase_lmdb.generate_random_structure - fairchem.core.tests.datasets.test_ase_lmdb.ase_lmbd_path - fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_write - fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_count - fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_delete - fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_randomreads - fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_constraintread - fairchem.core.tests.datasets.test_ase_lmdb.test_update_keyvalue_pair - fairchem.core.tests.datasets.test_ase_lmdb.test_update_atoms - fairchem.core.tests.datasets.test_ase_lmdb.test_metadata - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.datasets.test_ase_lmdb.N_WRITES - fairchem.core.tests.datasets.test_ase_lmdb.N_READS - fairchem.core.tests.datasets.test_ase_lmdb.test_structures - - -.. py:data:: N_WRITES - :value: 100 - - - -.. py:data:: N_READS - :value: 200 - - - -.. py:data:: test_structures - - - -.. py:function:: generate_random_structure() - - -.. py:function:: ase_lmbd_path(tmp_path_factory) - - -.. py:function:: test_aselmdb_write(ase_lmbd_path) -> None - - -.. py:function:: test_aselmdb_count(ase_lmbd_path) -> None - - -.. py:function:: test_aselmdb_delete(ase_lmbd_path) -> None - - -.. py:function:: test_aselmdb_randomreads(ase_lmbd_path) -> None - - -.. py:function:: test_aselmdb_constraintread(ase_lmbd_path) -> None - - -.. py:function:: test_update_keyvalue_pair(ase_lmbd_path) -> None - - -.. py:function:: test_update_atoms(ase_lmbd_path) -> None - - -.. py:function:: test_metadata(ase_lmbd_path) -> None - - diff --git a/_sources/autoapi/fairchem/core/tests/datasets/test_utils/index.rst b/_sources/autoapi/fairchem/core/tests/datasets/test_utils/index.rst deleted file mode 100644 index ce7efb46d..000000000 --- a/_sources/autoapi/fairchem/core/tests/datasets/test_utils/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.core.tests.datasets.test_utils` -================================================= - -.. py:module:: fairchem.core.tests.datasets.test_utils - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.datasets.test_utils.pyg_data - fairchem.core.tests.datasets.test_utils.test_rename_data_object_keys - - - -.. py:function:: pyg_data() - - -.. py:function:: test_rename_data_object_keys(pyg_data) - - diff --git a/_sources/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.rst b/_sources/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.rst deleted file mode 100644 index fbbf99048..000000000 --- a/_sources/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.rst +++ /dev/null @@ -1,80 +0,0 @@ -:py:mod:`fairchem.core.tests.evaluator.test_evaluator` -====================================================== - -.. py:module:: fairchem.core.tests.evaluator.test_evaluator - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.evaluator.test_evaluator.TestMetrics - fairchem.core.tests.evaluator.test_evaluator.TestS2EFEval - fairchem.core.tests.evaluator.test_evaluator.TestIS2RSEval - fairchem.core.tests.evaluator.test_evaluator.TestIS2REEval - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.evaluator.test_evaluator.load_evaluator_s2ef - fairchem.core.tests.evaluator.test_evaluator.load_evaluator_is2rs - fairchem.core.tests.evaluator.test_evaluator.load_evaluator_is2re - - - -.. py:function:: load_evaluator_s2ef(request) -> None - - -.. py:function:: load_evaluator_is2rs(request) -> None - - -.. py:function:: load_evaluator_is2re(request) -> None - - -.. py:class:: TestMetrics - - - .. py:method:: test_cosine_similarity() -> None - - - .. py:method:: test_magnitude_error() -> None - - - -.. py:class:: TestS2EFEval - - - .. py:method:: test_metrics_exist() -> None - - - -.. py:class:: TestIS2RSEval - - - .. py:method:: test_metrics_exist() -> None - - - -.. py:class:: TestIS2REEval - - - .. py:method:: test_metrics_exist() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/index.rst b/_sources/autoapi/fairchem/core/tests/index.rst deleted file mode 100644 index 5d7a0ac78..000000000 --- a/_sources/autoapi/fairchem/core/tests/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -:py:mod:`fairchem.core.tests` -============================= - -.. py:module:: fairchem.core.tests - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - preprocessing/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - conftest/index.rst - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_dimenetpp/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_dimenetpp/index.rst deleted file mode 100644 index fb326096f..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_dimenetpp/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_dimenetpp` -=================================================== - -.. py:module:: fairchem.core.tests.models.test_dimenetpp - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_dimenetpp.TestDimeNet - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_dimenetpp.load_data - fairchem.core.tests.models.test_dimenetpp.load_model - - - -.. py:function:: load_data(request) -> None - - -.. py:function:: load_model(request) -> None - - -.. py:class:: TestDimeNet - - - .. py:method:: test_rotation_invariance() -> None - - - .. py:method:: test_energy_force_shape(snapshot) -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.rst deleted file mode 100644 index 9840ba2d2..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.rst +++ /dev/null @@ -1,57 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_equiformer_v2` -======================================================= - -.. py:module:: fairchem.core.tests.models.test_equiformer_v2 - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_equiformer_v2.TestEquiformerV2 - fairchem.core.tests.models.test_equiformer_v2.TestMPrimaryLPrimary - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_equiformer_v2.load_data - fairchem.core.tests.models.test_equiformer_v2.load_model - - - -.. py:function:: load_data(request) - - -.. py:function:: load_model(request) - - -.. py:class:: TestEquiformerV2 - - - .. py:method:: test_energy_force_shape(snapshot) - - - -.. py:class:: TestMPrimaryLPrimary - - - .. py:method:: test_mprimary_lprimary_mappings() - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_escn/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_escn/index.rst deleted file mode 100644 index 65485a157..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_escn/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_escn` -============================================== - -.. py:module:: fairchem.core.tests.models.test_escn - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_escn.TestMPrimaryLPrimary - - - - -.. py:class:: TestMPrimaryLPrimary - - - .. py:method:: test_mprimary_lprimary_mappings() - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_gemnet/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_gemnet/index.rst deleted file mode 100644 index 862604058..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_gemnet/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_gemnet` -================================================ - -.. py:module:: fairchem.core.tests.models.test_gemnet - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_gemnet.TestGemNetT - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_gemnet.load_data - fairchem.core.tests.models.test_gemnet.load_model - - - -.. py:function:: load_data(request) -> None - - -.. py:function:: load_model(request) -> None - - -.. py:class:: TestGemNetT - - - .. py:method:: test_rotation_invariance() -> None - - - .. py:method:: test_energy_force_shape(snapshot) -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.rst deleted file mode 100644 index 5f9f49be8..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_gemnet_oc` -=================================================== - -.. py:module:: fairchem.core.tests.models.test_gemnet_oc - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_gemnet_oc.TestGemNetOC - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_gemnet_oc.load_data - fairchem.core.tests.models.test_gemnet_oc.load_model - - - -.. py:function:: load_data(request) -> None - - -.. py:function:: load_model(request) -> None - - -.. py:class:: TestGemNetOC - - - .. py:method:: test_rotation_invariance() -> None - - - .. py:method:: test_energy_force_shape(snapshot) -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst deleted file mode 100644 index 827d079f0..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch` -==================================================================== - -.. py:module:: fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch.TestGemNetOC - - - - -.. py:class:: TestGemNetOC - - - .. py:method:: test_no_scaling_mismatch() -> None - - - .. py:method:: test_scaling_mismatch() -> None - - - .. py:method:: test_no_file_exists() -> None - - - .. py:method:: test_not_fitted() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/models/test_schnet/index.rst b/_sources/autoapi/fairchem/core/tests/models/test_schnet/index.rst deleted file mode 100644 index 89deac8d2..000000000 --- a/_sources/autoapi/fairchem/core/tests/models/test_schnet/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -:py:mod:`fairchem.core.tests.models.test_schnet` -================================================ - -.. py:module:: fairchem.core.tests.models.test_schnet - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_schnet.TestSchNet - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.models.test_schnet.load_data - fairchem.core.tests.models.test_schnet.load_model - - - -.. py:function:: load_data(request) -> None - - -.. py:function:: load_model(request) -> None - - -.. py:class:: TestSchNet - - - .. py:method:: test_rotation_invariance() -> None - - - .. py:method:: test_energy_force_shape(snapshot) -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/preprocessing/index.rst b/_sources/autoapi/fairchem/core/tests/preprocessing/index.rst deleted file mode 100644 index 0dad4e5ec..000000000 --- a/_sources/autoapi/fairchem/core/tests/preprocessing/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -:py:mod:`fairchem.core.tests.preprocessing` -=========================================== - -.. py:module:: fairchem.core.tests.preprocessing - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - test_atoms_to_graphs/index.rst - test_pbc/index.rst - test_radius_graph_pbc/index.rst - - diff --git a/_sources/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.rst b/_sources/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.rst deleted file mode 100644 index b821c1477..000000000 --- a/_sources/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.rst +++ /dev/null @@ -1,51 +0,0 @@ -:py:mod:`fairchem.core.tests.preprocessing.test_atoms_to_graphs` -================================================================ - -.. py:module:: fairchem.core.tests.preprocessing.test_atoms_to_graphs - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_atoms_to_graphs.TestAtomsToGraphs - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_atoms_to_graphs.atoms_to_graphs_internals - - - -.. py:function:: atoms_to_graphs_internals(request) -> None - - -.. py:class:: TestAtomsToGraphs - - - .. py:method:: test_gen_neighbors_pymatgen() -> None - - - .. py:method:: test_convert() -> None - - - .. py:method:: test_convert_all() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.rst b/_sources/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.rst deleted file mode 100644 index 355ad43fb..000000000 --- a/_sources/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.rst +++ /dev/null @@ -1,45 +0,0 @@ -:py:mod:`fairchem.core.tests.preprocessing.test_pbc` -==================================================== - -.. py:module:: fairchem.core.tests.preprocessing.test_pbc - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_pbc.TestPBC - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_pbc.load_data - - - -.. py:function:: load_data(request) -> None - - -.. py:class:: TestPBC - - - .. py:method:: test_pbc_distances() -> None - - - diff --git a/_sources/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.rst b/_sources/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.rst deleted file mode 100644 index 5037a0e7a..000000000 --- a/_sources/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.rst +++ /dev/null @@ -1,55 +0,0 @@ -:py:mod:`fairchem.core.tests.preprocessing.test_radius_graph_pbc` -================================================================= - -.. py:module:: fairchem.core.tests.preprocessing.test_radius_graph_pbc - -.. autoapi-nested-parse:: - - Copyright (c) Facebook, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_radius_graph_pbc.TestRadiusGraphPBC - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.core.tests.preprocessing.test_radius_graph_pbc.load_data - fairchem.core.tests.preprocessing.test_radius_graph_pbc.check_features_match - - - -.. py:function:: load_data(request) -> None - - -.. py:function:: check_features_match(edge_index_1, cell_offsets_1, edge_index_2, cell_offsets_2) -> bool - - -.. py:class:: TestRadiusGraphPBC - - - .. py:method:: test_radius_graph_pbc() -> None - - - .. py:method:: test_bulk() -> None - - - .. py:method:: test_molecule() -> None - - - diff --git a/_sources/autoapi/fairchem/core/trainers/base_trainer/index.rst b/_sources/autoapi/fairchem/core/trainers/base_trainer/index.rst deleted file mode 100644 index 1af814235..000000000 --- a/_sources/autoapi/fairchem/core/trainers/base_trainer/index.rst +++ /dev/null @@ -1,103 +0,0 @@ -:py:mod:`fairchem.core.trainers.base_trainer` -============================================= - -.. py:module:: fairchem.core.trainers.base_trainer - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.trainers.base_trainer.BaseTrainer - - - - -.. py:class:: BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False) - - - Bases: :py:obj:`abc.ABC` - - Helper class that provides a standard way to create an ABC using - inheritance. - - .. py:property:: _unwrapped_model - - - .. py:method:: train(disable_eval_tqdm: bool = False) -> None - :abstractmethod: - - Run model training iterations. - - - .. py:method:: _get_timestamp(device: torch.device, suffix: str | None) -> str - :staticmethod: - - - .. py:method:: load() -> None - - - .. py:method:: set_seed(seed) -> None - - - .. py:method:: load_seed_from_config() -> None - - - .. py:method:: load_logger() -> None - - - .. py:method:: get_sampler(dataset, batch_size: int, shuffle: bool) -> fairchem.core.common.data_parallel.BalancedBatchSampler - - - .. py:method:: get_dataloader(dataset, sampler) -> torch.utils.data.DataLoader - - - .. py:method:: load_datasets() -> None - - - .. py:method:: load_task() - - - .. py:method:: load_model() -> None - - - .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None - - - .. py:method:: load_loss() -> None - - - .. py:method:: load_optimizer() -> None - - - .. py:method:: load_extras() -> None - - - .. py:method:: save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) -> str | None - - - .. py:method:: update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) -> None - - - .. py:method:: validate(split: str = 'val', disable_tqdm: bool = False) - - - .. py:method:: _backward(loss) -> None - - - .. py:method:: save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) -> None - - - diff --git a/_sources/autoapi/fairchem/core/trainers/index.rst b/_sources/autoapi/fairchem/core/trainers/index.rst deleted file mode 100644 index 4ea569b49..000000000 --- a/_sources/autoapi/fairchem/core/trainers/index.rst +++ /dev/null @@ -1,184 +0,0 @@ -:py:mod:`fairchem.core.trainers` -================================ - -.. py:module:: fairchem.core.trainers - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - base_trainer/index.rst - ocp_trainer/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.trainers.BaseTrainer - fairchem.core.trainers.OCPTrainer - - - - -.. py:class:: BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False) - - - Bases: :py:obj:`abc.ABC` - - Helper class that provides a standard way to create an ABC using - inheritance. - - .. py:property:: _unwrapped_model - - - .. py:method:: train(disable_eval_tqdm: bool = False) -> None - :abstractmethod: - - Run model training iterations. - - - .. py:method:: _get_timestamp(device: torch.device, suffix: str | None) -> str - :staticmethod: - - - .. py:method:: load() -> None - - - .. py:method:: set_seed(seed) -> None - - - .. py:method:: load_seed_from_config() -> None - - - .. py:method:: load_logger() -> None - - - .. py:method:: get_sampler(dataset, batch_size: int, shuffle: bool) -> fairchem.core.common.data_parallel.BalancedBatchSampler - - - .. py:method:: get_dataloader(dataset, sampler) -> torch.utils.data.DataLoader - - - .. py:method:: load_datasets() -> None - - - .. py:method:: load_task() - - - .. py:method:: load_model() -> None - - - .. py:method:: load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) -> None - - - .. py:method:: load_loss() -> None - - - .. py:method:: load_optimizer() -> None - - - .. py:method:: load_extras() -> None - - - .. py:method:: save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) -> str | None - - - .. py:method:: update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) -> None - - - .. py:method:: validate(split: str = 'val', disable_tqdm: bool = False) - - - .. py:method:: _backward(loss) -> None - - - .. py:method:: save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) -> None - - - -.. py:class:: OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') - - - Bases: :py:obj:`fairchem.core.trainers.base_trainer.BaseTrainer` - - Trainer class for the Structure to Energy & Force (S2EF) and Initial State to - Relaxed State (IS2RS) tasks. - - .. note:: - - Examples of configurations for task, model, dataset and optimizer - can be found in `configs/ocp_s2ef `_ - and `configs/ocp_is2rs `_. - - :param task: Task configuration. - :type task: dict - :param model: Model configuration. - :type model: dict - :param outputs: Output property configuration. - :type outputs: dict - :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. - :type dataset: dict - :param optimizer: Optimizer configuration. - :type optimizer: dict - :param loss_fns: Loss function configuration. - :type loss_fns: dict - :param eval_metrics: Evaluation metrics configuration. - :type eval_metrics: dict - :param identifier: Experiment identifier that is appended to log directory. - :type identifier: str - :param run_dir: Path to the run directory where logs are to be saved. - (default: :obj:`None`) - :type run_dir: str, optional - :param is_debug: Run in debug mode. - (default: :obj:`False`) - :type is_debug: bool, optional - :param print_every: Frequency of printing logs. - (default: :obj:`100`) - :type print_every: int, optional - :param seed: Random number seed. - (default: :obj:`None`) - :type seed: int, optional - :param logger: Type of logger to be used. - (default: :obj:`wandb`) - :type logger: str, optional - :param local_rank: Local rank of the process, only applicable for distributed training. - (default: :obj:`0`) - :type local_rank: int, optional - :param amp: Run using automatic mixed precision. - (default: :obj:`False`) - :type amp: bool, optional - :param slurm: Slurm configuration. Currently just for keeping track. - (default: :obj:`{}`) - :type slurm: dict - :param noddp: Run model without DDP. - :type noddp: bool, optional - - .. py:method:: train(disable_eval_tqdm: bool = False) -> None - - Run model training iterations. - - - .. py:method:: _forward(batch) - - - .. py:method:: _compute_loss(out, batch) - - - .. py:method:: _compute_metrics(out, batch, evaluator, metrics=None) - - - .. py:method:: predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False) - - - .. py:method:: run_relaxations(split='val') - - - diff --git a/_sources/autoapi/fairchem/core/trainers/ocp_trainer/index.rst b/_sources/autoapi/fairchem/core/trainers/ocp_trainer/index.rst deleted file mode 100644 index 751037009..000000000 --- a/_sources/autoapi/fairchem/core/trainers/ocp_trainer/index.rst +++ /dev/null @@ -1,105 +0,0 @@ -:py:mod:`fairchem.core.trainers.ocp_trainer` -============================================ - -.. py:module:: fairchem.core.trainers.ocp_trainer - -.. autoapi-nested-parse:: - - Copyright (c) Meta, Inc. and its affiliates. - - This source code is licensed under the MIT license found in the - LICENSE file in the root directory of this source tree. - - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.core.trainers.ocp_trainer.OCPTrainer - - - - -.. py:class:: OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp') - - - Bases: :py:obj:`fairchem.core.trainers.base_trainer.BaseTrainer` - - Trainer class for the Structure to Energy & Force (S2EF) and Initial State to - Relaxed State (IS2RS) tasks. - - .. note:: - - Examples of configurations for task, model, dataset and optimizer - can be found in `configs/ocp_s2ef `_ - and `configs/ocp_is2rs `_. - - :param task: Task configuration. - :type task: dict - :param model: Model configuration. - :type model: dict - :param outputs: Output property configuration. - :type outputs: dict - :param dataset: Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. - :type dataset: dict - :param optimizer: Optimizer configuration. - :type optimizer: dict - :param loss_fns: Loss function configuration. - :type loss_fns: dict - :param eval_metrics: Evaluation metrics configuration. - :type eval_metrics: dict - :param identifier: Experiment identifier that is appended to log directory. - :type identifier: str - :param run_dir: Path to the run directory where logs are to be saved. - (default: :obj:`None`) - :type run_dir: str, optional - :param is_debug: Run in debug mode. - (default: :obj:`False`) - :type is_debug: bool, optional - :param print_every: Frequency of printing logs. - (default: :obj:`100`) - :type print_every: int, optional - :param seed: Random number seed. - (default: :obj:`None`) - :type seed: int, optional - :param logger: Type of logger to be used. - (default: :obj:`wandb`) - :type logger: str, optional - :param local_rank: Local rank of the process, only applicable for distributed training. - (default: :obj:`0`) - :type local_rank: int, optional - :param amp: Run using automatic mixed precision. - (default: :obj:`False`) - :type amp: bool, optional - :param slurm: Slurm configuration. Currently just for keeping track. - (default: :obj:`{}`) - :type slurm: dict - :param noddp: Run model without DDP. - :type noddp: bool, optional - - .. py:method:: train(disable_eval_tqdm: bool = False) -> None - - Run model training iterations. - - - .. py:method:: _forward(batch) - - - .. py:method:: _compute_loss(out, batch) - - - .. py:method:: _compute_metrics(out, batch, evaluator, metrics=None) - - - .. py:method:: predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False) - - - .. py:method:: run_relaxations(split='val') - - - diff --git a/_sources/autoapi/fairchem/data/oc/core/adsorbate/index.rst b/_sources/autoapi/fairchem/data/oc/core/adsorbate/index.rst deleted file mode 100644 index 67e0fcbc6..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/adsorbate/index.rst +++ /dev/null @@ -1,75 +0,0 @@ -:py:mod:`fairchem.data.oc.core.adsorbate` -========================================= - -.. py:module:: fairchem.data.oc.core.adsorbate - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.adsorbate.Adsorbate - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.adsorbate.randomly_rotate_adsorbate - - - -.. py:class:: Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None) - - - Initializes an adsorbate object in one of 4 ways: - - Directly pass in an ase.Atoms object. - For this, you should also provide the index of the binding atom. - - Pass in index of adsorbate to select from adsorbate database. - - Pass in the SMILES string of the adsorbate to select from the database. - - Randomly sample an adsorbate from the adsorbate database. - - :param adsorbate_atoms: Adsorbate structure. - :type adsorbate_atoms: ase.Atoms - :param adsorbate_id_from_db: Index of adsorbate to select. - :type adsorbate_id_from_db: int - :param adsorbate_smiles_from_db: A SMILES string of the desired adsorbate. - :type adsorbate_smiles_from_db: str - :param adsorbate_db_path: Path to adsorbate database. - :type adsorbate_db_path: str - :param adsorbate_binding_indices: The index/indices of the adsorbate atoms which are expected to bind. - :type adsorbate_binding_indices: list - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: _get_adsorbate_from_random(adsorbate_db) - - - .. py:method:: _load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) -> None - - Saves the fields from an adsorbate stored in a database. Fields added - after the first revision are conditionally added for backwards - compatibility with older database files. - - - -.. py:function:: randomly_rotate_adsorbate(adsorbate_atoms: ase.Atoms, mode: str = 'random', binding_idx: int = None) - - diff --git a/_sources/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.rst b/_sources/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.rst deleted file mode 100644 index 1cc17a55e..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.rst +++ /dev/null @@ -1,214 +0,0 @@ -:py:mod:`fairchem.data.oc.core.adsorbate_slab_config` -===================================================== - -.. py:module:: fairchem.data.oc.core.adsorbate_slab_config - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.adsorbate_slab_config.AdsorbateSlabConfig - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.adsorbate_slab_config.get_random_sites_on_triangle - fairchem.data.oc.core.adsorbate_slab_config.custom_tile_atoms - fairchem.data.oc.core.adsorbate_slab_config.get_interstitial_distances - fairchem.data.oc.core.adsorbate_slab_config.there_is_overlap - - - -.. py:class:: AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random') - - - Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab. - - :param slab: Slab object. - :type slab: Slab - :param adsorbate: Adsorbate object. - :type adsorbate: Adsorbate - :param num_sites: Number of sites to sample. - :type num_sites: int - :param num_augmentations_per_site: Number of augmentations of the adsorbate per site. Total number of - generated structures will be `num_sites` * `num_augmentations_per_site`. - :type num_augmentations_per_site: int - :param interstitial_gap: Minimum distance in Angstroms between adsorbate and slab atoms. - :type interstitial_gap: float - :param mode: "random", "heuristic", or "random_site_heuristic_placement". - This affects surface site sampling and adsorbate placement on each site. - - In "random", we do a Delaunay triangulation of the surface atoms, then - sample sites uniformly at random within each triangle. When placing the - adsorbate, we randomly rotate it along xyz, and place it such that the - center of mass is at the site. - - In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most - energetically favorable sites, i.e., ontop, bridge, or hollow sites. - When placing the adsorbate, we randomly rotate it along z with only - slight rotation along x and y, and place it such that the binding atom - is at the site. - - In "random_site_heuristic_placement", we do a Delaunay triangulation of - the surface atoms, then sample sites uniformly at random within each - triangle. When placing the adsorbate, we randomly rotate it along z with - only slight rotation along x and y, and place it such that the binding - atom is at the site. - - In all cases, the adsorbate is placed at the closest position of no - overlap with the slab plus `interstitial_gap` along the surface normal. - :type mode: str - - .. py:method:: get_binding_sites(num_sites: int) - - Returns up to `num_sites` sites given the surface atoms' positions. - - - .. py:method:: place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding site. - - - .. py:method:: place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding sites. - - - .. py:method:: _get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1) - - Get the scaled normal that gives a proximate configuration without atomic - overlap by: - 1. Projecting the adsorbate and surface atoms onto the surface plane. - 2. Identify all adsorbate atom - surface atom combinations for which - an itersection when translating along the normal would occur. - This is where the distance between the projected points is less than - r_surface_atom + r_adsorbate_atom - 3. Explicitly solve for the scaled normal at which the distance between - surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + - interstitial_gap. This exploits the superposition of vectors and the - distance formula, so it requires root finding. - - Assumes that the adsorbate's binding atom or center-of-mass (depending - on mode) is already placed at the site. - - :param adsorbate_c: A copy of the adsorbate with coordinates at the site - :type adsorbate_c: ase.Atoms - :param slab_c: A copy of the slab - :type slab_c: ase.Atoms - :param site: the coordinate of the site - :type site: np.ndarray - :param adsorbate_atoms: the translated adsorbate - :type adsorbate_atoms: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - :param interstitial_gap: the desired distance between the covalent radii of the - closest surface and adsorbate atom - :type interstitial_gap: float - - :returns: the magnitude of the normal vector for placement - :rtype: (float) - - - .. py:method:: _find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float) - - Find the pairs of surface and adsorbate atoms that would have an intersection event - while traversing the normal vector. For each pair, return pertanent information for - finding the point of intersection. - :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site - :type adsorbate_c2: ase.Atoms - :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered - about the site - :type slab_c2: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - :param interstitial_gap: the desired distance between the covalent radii of the - closest surface and adsorbate atom - :type interstitial_gap: float - - :returns: - - each entry in the list corresponds to one pair to check. With the - following information: - [(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position] - :rtype: (list[lists]) - - - .. py:method:: _get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray) - - Find the x and y coordinates of each atom projected onto the surface plane. - :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site - :type adsorbate_c2: ase.Atoms - :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered - about the site - :type slab_c2: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - - :returns: {"ads": [[x1, y1], [x2, y2], ...], "slab": [[x1, y1], [x2, y2], ...],} - :rtype: (dict) - - - .. py:method:: get_metadata_dict(ind) - - Returns a dict containing the atoms object and metadata for - one specified config, used for writing to files. - - - -.. py:function:: get_random_sites_on_triangle(vertices: numpy.ndarray, num_sites: int = 10) - - Sample `num_sites` random sites uniformly on a given 3D triangle. - Following Sec. 4.2 from https://www.cs.princeton.edu/~funk/tog02.pdf. - - -.. py:function:: custom_tile_atoms(atoms: ase.Atoms) - - Tile the atoms so that the center tile has the indices and positions of the - untiled structure. - - :param atoms: the atoms object to be tiled - :type atoms: ase.Atoms - - :returns: - - the tiled atoms which has been repeated 3 times in - the x and y directions but maintains the original indices on the central - unit cell. - :rtype: (ase.Atoms) - - -.. py:function:: get_interstitial_distances(adsorbate_slab_config: ase.Atoms) - - Check to see if there is any atomic overlap between surface atoms - and adsorbate atoms. - - :param adsorbate_slab_configuration: an slab atoms object with an - adsorbate placed - :type adsorbate_slab_configuration: ase.Atoms - - :returns: True if there is atomic overlap, otherwise False - :rtype: (bool) - - -.. py:function:: there_is_overlap(adsorbate_slab_config: ase.Atoms) - - Check to see if there is any atomic overlap between surface atoms - and adsorbate atoms. - - :param adsorbate_slab_configuration: an slab atoms object with an - adsorbate placed - :type adsorbate_slab_configuration: ase.Atoms - - :returns: True if there is atomic overlap, otherwise False - :rtype: (bool) - - diff --git a/_sources/autoapi/fairchem/data/oc/core/bulk/index.rst b/_sources/autoapi/fairchem/data/oc/core/bulk/index.rst deleted file mode 100644 index 82d6500fa..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/bulk/index.rst +++ /dev/null @@ -1,72 +0,0 @@ -:py:mod:`fairchem.data.oc.core.bulk` -==================================== - -.. py:module:: fairchem.data.oc.core.bulk - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.bulk.Bulk - - - - -.. py:class:: Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None) - - - Initializes a bulk object in one of 4 ways: - - Directly pass in an ase.Atoms object. - - Pass in index of bulk to select from bulk database. - - Pass in the src_id of the bulk to select from the bulk database. - - Randomly sample a bulk from bulk database if no other option is passed. - - :param bulk_atoms: Bulk structure. - :type bulk_atoms: ase.Atoms - :param bulk_id_from_db: Index of bulk in database pkl to select. - :type bulk_id_from_db: int - :param bulk_src_id_from_db: Src id of bulk to select (e.g. "mp-30"). - :type bulk_src_id_from_db: int - :param bulk_db_path: Path to bulk database. - :type bulk_db_path: str - :param bulk_db: Already-loaded database. - :type bulk_db: List[Dict[str, Any]] - - .. py:method:: _get_bulk_from_random(bulk_db) - - - .. py:method:: set_source_dataset_id(src_id: str) - - - .. py:method:: set_bulk_id_from_db(bulk_id_from_db: int) - - - .. py:method:: get_slabs(max_miller=2, precomputed_slabs_dir=None) - - Returns a list of possible slabs for this bulk instance. - - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: __eq__(other) -> bool - - Return self==value. - - - diff --git a/_sources/autoapi/fairchem/data/oc/core/index.rst b/_sources/autoapi/fairchem/data/oc/core/index.rst deleted file mode 100644 index 690f9b1ef..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/index.rst +++ /dev/null @@ -1,403 +0,0 @@ -:py:mod:`fairchem.data.oc.core` -=============================== - -.. py:module:: fairchem.data.oc.core - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - adsorbate/index.rst - adsorbate_slab_config/index.rst - bulk/index.rst - multi_adsorbate_slab_config/index.rst - slab/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.Bulk - fairchem.data.oc.core.Slab - fairchem.data.oc.core.Adsorbate - fairchem.data.oc.core.AdsorbateSlabConfig - fairchem.data.oc.core.MultipleAdsorbateSlabConfig - - - - -.. py:class:: Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None) - - - Initializes a bulk object in one of 4 ways: - - Directly pass in an ase.Atoms object. - - Pass in index of bulk to select from bulk database. - - Pass in the src_id of the bulk to select from the bulk database. - - Randomly sample a bulk from bulk database if no other option is passed. - - :param bulk_atoms: Bulk structure. - :type bulk_atoms: ase.Atoms - :param bulk_id_from_db: Index of bulk in database pkl to select. - :type bulk_id_from_db: int - :param bulk_src_id_from_db: Src id of bulk to select (e.g. "mp-30"). - :type bulk_src_id_from_db: int - :param bulk_db_path: Path to bulk database. - :type bulk_db_path: str - :param bulk_db: Already-loaded database. - :type bulk_db: List[Dict[str, Any]] - - .. py:method:: _get_bulk_from_random(bulk_db) - - - .. py:method:: set_source_dataset_id(src_id: str) - - - .. py:method:: set_bulk_id_from_db(bulk_id_from_db: int) - - - .. py:method:: get_slabs(max_miller=2, precomputed_slabs_dir=None) - - Returns a list of possible slabs for this bulk instance. - - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: __eq__(other) -> bool - - Return self==value. - - - -.. py:class:: Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8) - - - Initializes a slab object, i.e. a particular slab tiled along xyz, in - one of 2 ways: - - Pass in a Bulk object and a slab 5-tuple containing - (atoms, miller, shift, top, oriented bulk). - - Pass in a Bulk object and randomly sample a slab. - - :param bulk: Corresponding Bulk object. - :type bulk: Bulk - :param slab_atoms: Slab atoms, tiled and tagged - :type slab_atoms: ase.Atoms - :param millers: Miller indices of slab. - :type millers: tuple - :param shift: Shift of slab. - :type shift: float - :param top: Whether slab is top or bottom. - :type top: bool - :param min_ab: To confirm that the tiled structure spans this distance - :type min_ab: float - - .. py:method:: from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0) - :classmethod: - - - .. py:method:: from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs) - :classmethod: - - - .. py:method:: has_surface_tagged() - - - .. py:method:: get_metadata_dict() - - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: __eq__(other) - - Return self==value. - - - -.. py:class:: Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None) - - - Initializes an adsorbate object in one of 4 ways: - - Directly pass in an ase.Atoms object. - For this, you should also provide the index of the binding atom. - - Pass in index of adsorbate to select from adsorbate database. - - Pass in the SMILES string of the adsorbate to select from the database. - - Randomly sample an adsorbate from the adsorbate database. - - :param adsorbate_atoms: Adsorbate structure. - :type adsorbate_atoms: ase.Atoms - :param adsorbate_id_from_db: Index of adsorbate to select. - :type adsorbate_id_from_db: int - :param adsorbate_smiles_from_db: A SMILES string of the desired adsorbate. - :type adsorbate_smiles_from_db: str - :param adsorbate_db_path: Path to adsorbate database. - :type adsorbate_db_path: str - :param adsorbate_binding_indices: The index/indices of the adsorbate atoms which are expected to bind. - :type adsorbate_binding_indices: list - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: _get_adsorbate_from_random(adsorbate_db) - - - .. py:method:: _load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) -> None - - Saves the fields from an adsorbate stored in a database. Fields added - after the first revision are conditionally added for backwards - compatibility with older database files. - - - -.. py:class:: AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random') - - - Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab. - - :param slab: Slab object. - :type slab: Slab - :param adsorbate: Adsorbate object. - :type adsorbate: Adsorbate - :param num_sites: Number of sites to sample. - :type num_sites: int - :param num_augmentations_per_site: Number of augmentations of the adsorbate per site. Total number of - generated structures will be `num_sites` * `num_augmentations_per_site`. - :type num_augmentations_per_site: int - :param interstitial_gap: Minimum distance in Angstroms between adsorbate and slab atoms. - :type interstitial_gap: float - :param mode: "random", "heuristic", or "random_site_heuristic_placement". - This affects surface site sampling and adsorbate placement on each site. - - In "random", we do a Delaunay triangulation of the surface atoms, then - sample sites uniformly at random within each triangle. When placing the - adsorbate, we randomly rotate it along xyz, and place it such that the - center of mass is at the site. - - In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most - energetically favorable sites, i.e., ontop, bridge, or hollow sites. - When placing the adsorbate, we randomly rotate it along z with only - slight rotation along x and y, and place it such that the binding atom - is at the site. - - In "random_site_heuristic_placement", we do a Delaunay triangulation of - the surface atoms, then sample sites uniformly at random within each - triangle. When placing the adsorbate, we randomly rotate it along z with - only slight rotation along x and y, and place it such that the binding - atom is at the site. - - In all cases, the adsorbate is placed at the closest position of no - overlap with the slab plus `interstitial_gap` along the surface normal. - :type mode: str - - .. py:method:: get_binding_sites(num_sites: int) - - Returns up to `num_sites` sites given the surface atoms' positions. - - - .. py:method:: place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding site. - - - .. py:method:: place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding sites. - - - .. py:method:: _get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1) - - Get the scaled normal that gives a proximate configuration without atomic - overlap by: - 1. Projecting the adsorbate and surface atoms onto the surface plane. - 2. Identify all adsorbate atom - surface atom combinations for which - an itersection when translating along the normal would occur. - This is where the distance between the projected points is less than - r_surface_atom + r_adsorbate_atom - 3. Explicitly solve for the scaled normal at which the distance between - surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + - interstitial_gap. This exploits the superposition of vectors and the - distance formula, so it requires root finding. - - Assumes that the adsorbate's binding atom or center-of-mass (depending - on mode) is already placed at the site. - - :param adsorbate_c: A copy of the adsorbate with coordinates at the site - :type adsorbate_c: ase.Atoms - :param slab_c: A copy of the slab - :type slab_c: ase.Atoms - :param site: the coordinate of the site - :type site: np.ndarray - :param adsorbate_atoms: the translated adsorbate - :type adsorbate_atoms: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - :param interstitial_gap: the desired distance between the covalent radii of the - closest surface and adsorbate atom - :type interstitial_gap: float - - :returns: the magnitude of the normal vector for placement - :rtype: (float) - - - .. py:method:: _find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float) - - Find the pairs of surface and adsorbate atoms that would have an intersection event - while traversing the normal vector. For each pair, return pertanent information for - finding the point of intersection. - :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site - :type adsorbate_c2: ase.Atoms - :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered - about the site - :type slab_c2: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - :param interstitial_gap: the desired distance between the covalent radii of the - closest surface and adsorbate atom - :type interstitial_gap: float - - :returns: - - each entry in the list corresponds to one pair to check. With the - following information: - [(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position] - :rtype: (list[lists]) - - - .. py:method:: _get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray) - - Find the x and y coordinates of each atom projected onto the surface plane. - :param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site - :type adsorbate_c2: ase.Atoms - :param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered - about the site - :type slab_c2: ase.Atoms - :param unit_normal: the unit vector normal to the surface - :type unit_normal: np.ndarray - - :returns: {"ads": [[x1, y1], [x2, y2], ...], "slab": [[x1, y1], [x2, y2], ...],} - :rtype: (dict) - - - .. py:method:: get_metadata_dict(ind) - - Returns a dict containing the atoms object and metadata for - one specified config, used for writing to files. - - - -.. py:class:: MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement') - - - Bases: :py:obj:`fairchem.data.oc.core.AdsorbateSlabConfig` - - Class to represent a slab with multiple adsorbates on it. This class only - returns a fixed combination of adsorbates placed on the surface. Unlike - AdsorbateSlabConfig which enumerates all possible adsorbate placements, this - problem gets combinatorially large. - - :param slab: Slab object. - :type slab: Slab - :param adsorbates: List of adsorbate objects to place on the slab. - :type adsorbates: List[Adsorbate] - :param num_sites: Number of sites to sample. - :type num_sites: int - :param num_configurations: Number of configurations to generate per slab+adsorbate(s) combination. - This corresponds to selecting different site combinations to place - the adsorbates on. - :type num_configurations: int - :param interstitial_gap: Minimum distance, in Angstroms, between adsorbate and slab atoms as - well as the inter-adsorbate distance. - :type interstitial_gap: float - :param mode: "random", "heuristic", or "random_site_heuristic_placement". - This affects surface site sampling and adsorbate placement on each site. - - In "random", we do a Delaunay triangulation of the surface atoms, then - sample sites uniformly at random within each triangle. When placing the - adsorbate, we randomly rotate it along xyz, and place it such that the - center of mass is at the site. - - In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most - energetically favorable sites, i.e., ontop, bridge, or hollow sites. - When placing the adsorbate, we randomly rotate it along z with only - slight rotation along x and y, and place it such that the binding atom - is at the site. - - In "random_site_heuristic_placement", we do a Delaunay triangulation of - the surface atoms, then sample sites uniformly at random within each - triangle. When placing the adsorbate, we randomly rotate it along z with - only slight rotation along x and y, and place it such that the binding - atom is at the site. - - In all cases, the adsorbate is placed at the closest position of no - overlap with the slab plus `interstitial_gap` along the surface normal. - :type mode: str - - .. py:method:: place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding sites. - - This method generates a fixed number of configurations where sites are - selected to ensure that adsorbate binding indices are at least a fair - distance away from each other (covalent radii + interstitial gap). - While this helps prevent adsorbate overlap it does not gaurantee it - since non-binding adsorbate atoms can overlap if the right combination - of angles is sampled. - - - .. py:method:: get_metadata_dict(ind) - - Returns a dict containing the atoms object and metadata for - one specified config, used for writing to files. - - - diff --git a/_sources/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.rst b/_sources/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.rst deleted file mode 100644 index 639eab0af..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.rst +++ /dev/null @@ -1,101 +0,0 @@ -:py:mod:`fairchem.data.oc.core.multi_adsorbate_slab_config` -=========================================================== - -.. py:module:: fairchem.data.oc.core.multi_adsorbate_slab_config - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.multi_adsorbate_slab_config.MultipleAdsorbateSlabConfig - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.multi_adsorbate_slab_config.update_distance_map - - - -.. py:class:: MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement') - - - Bases: :py:obj:`fairchem.data.oc.core.AdsorbateSlabConfig` - - Class to represent a slab with multiple adsorbates on it. This class only - returns a fixed combination of adsorbates placed on the surface. Unlike - AdsorbateSlabConfig which enumerates all possible adsorbate placements, this - problem gets combinatorially large. - - :param slab: Slab object. - :type slab: Slab - :param adsorbates: List of adsorbate objects to place on the slab. - :type adsorbates: List[Adsorbate] - :param num_sites: Number of sites to sample. - :type num_sites: int - :param num_configurations: Number of configurations to generate per slab+adsorbate(s) combination. - This corresponds to selecting different site combinations to place - the adsorbates on. - :type num_configurations: int - :param interstitial_gap: Minimum distance, in Angstroms, between adsorbate and slab atoms as - well as the inter-adsorbate distance. - :type interstitial_gap: float - :param mode: "random", "heuristic", or "random_site_heuristic_placement". - This affects surface site sampling and adsorbate placement on each site. - - In "random", we do a Delaunay triangulation of the surface atoms, then - sample sites uniformly at random within each triangle. When placing the - adsorbate, we randomly rotate it along xyz, and place it such that the - center of mass is at the site. - - In "heuristic", we use Pymatgen's AdsorbateSiteFinder to find the most - energetically favorable sites, i.e., ontop, bridge, or hollow sites. - When placing the adsorbate, we randomly rotate it along z with only - slight rotation along x and y, and place it such that the binding atom - is at the site. - - In "random_site_heuristic_placement", we do a Delaunay triangulation of - the surface atoms, then sample sites uniformly at random within each - triangle. When placing the adsorbate, we randomly rotate it along z with - only slight rotation along x and y, and place it such that the binding - atom is at the site. - - In all cases, the adsorbate is placed at the closest position of no - overlap with the slab plus `interstitial_gap` along the surface normal. - :type mode: str - - .. py:method:: place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1) - - Place the adsorbate at the given binding sites. - - This method generates a fixed number of configurations where sites are - selected to ensure that adsorbate binding indices are at least a fair - distance away from each other (covalent radii + interstitial gap). - While this helps prevent adsorbate overlap it does not gaurantee it - since non-binding adsorbate atoms can overlap if the right combination - of angles is sampled. - - - .. py:method:: get_metadata_dict(ind) - - Returns a dict containing the atoms object and metadata for - one specified config, used for writing to files. - - - -.. py:function:: update_distance_map(prev_distance_map, site_idx, adsorbate, pseudo_atoms) - - Given a new site and the adsorbate we plan on placing there, - update the distance mapping to reflect the new distances from sites to nearest adsorbates. - We incorporate the covalent radii of the placed adsorbate binding atom in our distance - calculation to prevent atom overlap. - - diff --git a/_sources/autoapi/fairchem/data/oc/core/slab/index.rst b/_sources/autoapi/fairchem/data/oc/core/slab/index.rst deleted file mode 100644 index 099c95b91..000000000 --- a/_sources/autoapi/fairchem/data/oc/core/slab/index.rst +++ /dev/null @@ -1,309 +0,0 @@ -:py:mod:`fairchem.data.oc.core.slab` -==================================== - -.. py:module:: fairchem.data.oc.core.slab - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.slab.Slab - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.core.slab.tile_and_tag_atoms - fairchem.data.oc.core.slab.set_fixed_atom_constraints - fairchem.data.oc.core.slab.tag_surface_atoms - fairchem.data.oc.core.slab.tile_atoms - fairchem.data.oc.core.slab.find_surface_atoms_by_height - fairchem.data.oc.core.slab.find_surface_atoms_with_voronoi_given_height - fairchem.data.oc.core.slab.calculate_center_of_mass - fairchem.data.oc.core.slab.calculate_coordination_of_bulk_atoms - fairchem.data.oc.core.slab.compute_slabs - fairchem.data.oc.core.slab.flip_struct - fairchem.data.oc.core.slab.is_structure_invertible - fairchem.data.oc.core.slab.standardize_bulk - - - -.. py:class:: Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8) - - - Initializes a slab object, i.e. a particular slab tiled along xyz, in - one of 2 ways: - - Pass in a Bulk object and a slab 5-tuple containing - (atoms, miller, shift, top, oriented bulk). - - Pass in a Bulk object and randomly sample a slab. - - :param bulk: Corresponding Bulk object. - :type bulk: Bulk - :param slab_atoms: Slab atoms, tiled and tagged - :type slab_atoms: ase.Atoms - :param millers: Miller indices of slab. - :type millers: tuple - :param shift: Shift of slab. - :type shift: float - :param top: Whether slab is top or bottom. - :type top: bool - :param min_ab: To confirm that the tiled structure spans this distance - :type min_ab: float - - .. py:method:: from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None) - :classmethod: - - - .. py:method:: from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0) - :classmethod: - - - .. py:method:: from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs) - :classmethod: - - - .. py:method:: has_surface_tagged() - - - .. py:method:: get_metadata_dict() - - - .. py:method:: __len__() - - - .. py:method:: __str__() - - Return str(self). - - - .. py:method:: __repr__() - - Return repr(self). - - - .. py:method:: __eq__(other) - - Return self==value. - - - -.. py:function:: tile_and_tag_atoms(unit_slab_struct: pymatgen.core.structure.Structure, bulk_atoms: ase.Atoms, min_ab: float = 8) - - This function combines the next three functions that tile, tag, - and constrain the atoms. - - :param unit_slab_struct: The untiled slab structure - :type unit_slab_struct: Structure - :param bulk_atoms: Atoms of the corresponding bulk structure, used for tagging - :type bulk_atoms: ase.Atoms - :param min_ab: The minimum distance in x and y spanned by the tiled structure. - :type min_ab: float - - :returns: **atoms_tiled** -- A copy of the slab atoms that is tiled, tagged, and constrained - :rtype: ase.Atoms - - -.. py:function:: set_fixed_atom_constraints(atoms) - - This function fixes sub-surface atoms of a surface. Also works on systems - that have surface + adsorbate(s), as long as the bulk atoms are tagged with - `0`, surface atoms are tagged with `1`, and the adsorbate atoms are tagged - with `2` or above. - - This is used for both surface atoms and the combined surface+adsorbate. - - :param atoms: Atoms object of the slab or slab+adsorbate system, with bulk atoms - tagged as `0`, surface atoms tagged as `1`, and adsorbate atoms tagged - as `2` or above. - :type atoms: ase.Atoms - - :returns: **atoms** -- A deep copy of the `atoms` argument, but where the appropriate - atoms are constrained. - :rtype: ase.Atoms - - -.. py:function:: tag_surface_atoms(slab_atoms: ase.Atoms = None, bulk_atoms: ase.Atoms = None) - - Sets the tags of an `ase.Atoms` object. Any atom that we consider a "bulk" - atom will have a tag of 0, and any atom that we consider a "surface" atom - will have a tag of 1. We use a combination of Voronoi neighbor algorithms - (adapted from `pymatgen.core.surface.Slab.get_surface_sites`; see - https://pymatgen.org/pymatgen.core.surface.html) and a distance cutoff. - - :param slab_atoms: The slab where you are trying to find surface sites. - :type slab_atoms: ase.Atoms - :param bulk_atoms: The bulk structure that the surface was cut from. - :type bulk_atoms: ase.Atoms - - :returns: **slab_atoms** -- A copy of the slab atoms with the surface atoms tagged as 1. - :rtype: ase.Atoms - - -.. py:function:: tile_atoms(atoms: ase.Atoms, min_ab: float = 8) - - This function will repeat an atoms structure in the direction of the a and b - lattice vectors such that they are at least as wide as the min_ab constant. - - :param atoms: The structure to tile. - :type atoms: ase.Atoms - :param min_ab: The minimum distance in x and y spanned by the tiled structure. - :type min_ab: float - - :returns: **atoms_tiled** -- The tiled structure. - :rtype: ase.Atoms - - -.. py:function:: find_surface_atoms_by_height(surface_atoms) - - As discussed in the docstring for `find_surface_atoms_with_voronoi`, - sometimes we might accidentally tag a surface atom as a bulk atom if there - are multiple coordination environments for that atom type within the bulk. - One heuristic that we use to address this is to simply figure out if an - atom is close to the surface. This function will figure that out. - - Specifically: We consider an atom a surface atom if it is within 2 - Angstroms of the heighest atom in the z-direction (or more accurately, the - direction of the 3rd unit cell vector). - - :param surface_atoms: - :type surface_atoms: ase.Atoms - - :returns: **tags** -- A list that contains the indices of the surface atoms. - :rtype: list - - -.. py:function:: find_surface_atoms_with_voronoi_given_height(bulk_atoms, slab_atoms, height_tags) - - Labels atoms as surface or bulk atoms according to their coordination - relative to their bulk structure. If an atom's coordination is less than it - normally is in a bulk, then we consider it a surface atom. We calculate the - coordination using pymatgen's Voronoi algorithms. - - Note that if a single element has different sites within a bulk and these - sites have different coordinations, then we consider slab atoms - "under-coordinated" only if they are less coordinated than the most under - undercoordinated bulk atom. For example: Say we have a bulk with two Cu - sites. One site has a coordination of 12 and another a coordination of 9. - If a slab atom has a coordination of 10, we will consider it a bulk atom. - - :param bulk_atoms: The bulk structure that the surface was cut from. - :type bulk_atoms: ase.Atoms - :param slab_atoms: The slab structure. - :type slab_atoms: ase.Atoms - :param height_tags: The tags determined by the `find_surface_atoms_by_height` algo. - :type height_tags: list - - :returns: **tags** -- A list of 0s and 1s whose indices align with the atoms in - `slab_atoms`. 0s indicate a bulk atom and 1 indicates a surface atom. - :rtype: list - - -.. py:function:: calculate_center_of_mass(struct) - - Calculates the center of mass of the slab. - - -.. py:function:: calculate_coordination_of_bulk_atoms(bulk_atoms) - - Finds all unique atoms in a bulk structure and then determines their - coordination number. Then parses these coordination numbers into a - dictionary whose keys are the elements of the atoms and whose values are - their possible coordination numbers. - For example: `bulk_cns = {'Pt': {3., 12.}, 'Pd': {12.}}` - - :param bulk_atoms: The bulk structure. - :type bulk_atoms: ase.Atoms - - :returns: **bulk_cn_dict** -- A dictionary whose keys are the elements of the atoms and whose values - are their possible coordination numbers. - :rtype: dict - - -.. py:function:: compute_slabs(bulk_atoms: ase.Atoms = None, max_miller: int = 2, specific_millers: list = None) - - Enumerates all the symmetrically distinct slabs of a bulk structure. - It will not enumerate slabs with Miller indices above the - `max_miller` argument. Note that we also look at the bottoms of slabs - if they are distinct from the top. If they are distinct, we flip the - surface so the bottom is pointing upwards. - - :param bulk_atoms: The bulk structure. - :type bulk_atoms: ase.Atoms - :param max_miller: The maximum Miller index of the slabs to enumerate. Increasing this - argument will increase the number of slabs, and the slabs will generally - become larger. - :type max_miller: int - :param specific_millers: A list of Miller indices that you want to enumerate. If this argument - is not `None`, then the `max_miller` argument is ignored. - :type specific_millers: list - - :returns: **all_slabs_info** -- A list of 5-tuples containing pymatgen structure objects for enumerated - slabs, the Miller indices, floats for the shifts, booleans for top, and - the oriented bulk structure. - :rtype: list - - -.. py:function:: flip_struct(struct: pymatgen.core.structure.Structure) - - Flips an atoms object upside down. Normally used to flip slabs. - - :param struct: pymatgen structure object of the surface you want to flip - :type struct: Structure - - :returns: **flipped_struct** -- pymatgen structure object of the flipped surface. - :rtype: Structure - - -.. py:function:: is_structure_invertible(struct: pymatgen.core.structure.Structure) - - This function figures out whether or not an `Structure` - object has symmetricity. In this function, the affine matrix is a rotation - matrix that is multiplied with the XYZ positions of the crystal. If the z,z - component of that is negative, it means symmetry operation exist, it could - be a mirror operation, or one that involves multiple rotations/etc. - Regardless, it means that the top becomes the bottom and vice-versa, and the - structure is the symmetric. i.e. structure_XYZ = structure_XYZ*M. - - In short: If this function returns `False`, then the input structure can - be flipped in the z-direction to create a new structure. - - :param struct: pymatgen structure object of the slab. - :type struct: Structure - - :returns: * A boolean indicating whether or not your `ase.Atoms` object is - * *symmetric in z-direction (i.e. symmetric with respect to x-y plane).* - - -.. py:function:: standardize_bulk(atoms: ase.Atoms) - - There are many ways to define a bulk unit cell. If you change the unit - cell itself but also change the locations of the atoms within the unit - cell, you can effectively get the same bulk structure. To address this, - there is a standardization method used to reduce the degrees of freedom - such that each unit cell only has one "true" configuration. This - function will align a unit cell you give it to fit within this - standardization. - - :param atoms: `ase.Atoms` object of the bulk you want to standardize. - :type atoms: ase.Atoms - - :returns: **standardized_struct** -- pymatgen structure object of the standardized bulk. - :rtype: Structure - - diff --git a/_sources/autoapi/fairchem/data/oc/databases/index.rst b/_sources/autoapi/fairchem/data/oc/databases/index.rst deleted file mode 100644 index 75b18327d..000000000 --- a/_sources/autoapi/fairchem/data/oc/databases/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:py:mod:`fairchem.data.oc.databases` -==================================== - -.. py:module:: fairchem.data.oc.databases - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - pkls/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - update/index.rst - - diff --git a/_sources/autoapi/fairchem/data/oc/databases/pkls/index.rst b/_sources/autoapi/fairchem/data/oc/databases/pkls/index.rst deleted file mode 100644 index 21e06cdf2..000000000 --- a/_sources/autoapi/fairchem/data/oc/databases/pkls/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:py:mod:`fairchem.data.oc.databases.pkls` -========================================= - -.. py:module:: fairchem.data.oc.databases.pkls - - -Package Contents ----------------- - -.. py:data:: BULK_PKL_PATH - - - -.. py:data:: ADSORBATES_PKL_PATH - - - diff --git a/_sources/autoapi/fairchem/data/oc/databases/update/index.rst b/_sources/autoapi/fairchem/data/oc/databases/update/index.rst deleted file mode 100644 index bf2be9dc6..000000000 --- a/_sources/autoapi/fairchem/data/oc/databases/update/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -:py:mod:`fairchem.data.oc.databases.update` -=========================================== - -.. py:module:: fairchem.data.oc.databases.update - -.. autoapi-nested-parse:: - - Script for updating ase pkl and db files from v3.19 to v3.21. - Run it with ase v3.19. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.databases.update.pbc_patch - fairchem.data.oc.databases.update.set_pbc_patch - fairchem.data.oc.databases.update.update_pkls - fairchem.data.oc.databases.update.update_dbs - - - -.. py:function:: pbc_patch(self) - - -.. py:function:: set_pbc_patch(self, pbc) - - -.. py:function:: update_pkls() - - -.. py:function:: update_dbs() - - diff --git a/_sources/autoapi/fairchem/data/oc/experimental/get_energies/index.rst b/_sources/autoapi/fairchem/data/oc/experimental/get_energies/index.rst deleted file mode 100644 index 70a85de2a..000000000 --- a/_sources/autoapi/fairchem/data/oc/experimental/get_energies/index.rst +++ /dev/null @@ -1,39 +0,0 @@ -:py:mod:`fairchem.data.oc.experimental.get_energies` -==================================================== - -.. py:module:: fairchem.data.oc.experimental.get_energies - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.get_energies.extract_file - fairchem.data.oc.experimental.get_energies.process_func - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.get_energies.input_folder - - -.. py:function:: extract_file(zipname, file_to_unzip, extract_to) - - -.. py:function:: process_func(indices, dirlist, ans) - - -.. py:data:: input_folder - :value: 'temp_download/' - - - diff --git a/_sources/autoapi/fairchem/data/oc/experimental/merge_traj/index.rst b/_sources/autoapi/fairchem/data/oc/experimental/merge_traj/index.rst deleted file mode 100644 index 6d216f4e5..000000000 --- a/_sources/autoapi/fairchem/data/oc/experimental/merge_traj/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -:py:mod:`fairchem.data.oc.experimental.merge_traj` -================================================== - -.. py:module:: fairchem.data.oc.experimental.merge_traj - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.merge_traj.extract_file - fairchem.data.oc.experimental.merge_traj.main - - - -.. py:function:: extract_file(zipname, file_to_unzip, extract_to) - - -.. py:function:: main() - - Given a directory containing adsorbate subdirectories, loops through all - runs and merges intermediate checkpoints into a single, full trajectory. - - diff --git a/_sources/autoapi/fairchem/data/oc/experimental/perturb_systems/index.rst b/_sources/autoapi/fairchem/data/oc/experimental/perturb_systems/index.rst deleted file mode 100644 index 3888fca0f..000000000 --- a/_sources/autoapi/fairchem/data/oc/experimental/perturb_systems/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.data.oc.experimental.perturb_systems` -======================================================= - -.. py:module:: fairchem.data.oc.experimental.perturb_systems - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.perturb_systems.main - - - -.. py:function:: main() - - Rattles every image along a relaxation pathway at 5 different variances. - Rattled images are then put in their own directory along with the input - files necessary to run VASP calculations. - - diff --git a/_sources/autoapi/fairchem/data/oc/experimental/rattle_test/index.rst b/_sources/autoapi/fairchem/data/oc/experimental/rattle_test/index.rst deleted file mode 100644 index 9449c836e..000000000 --- a/_sources/autoapi/fairchem/data/oc/experimental/rattle_test/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -:py:mod:`fairchem.data.oc.experimental.rattle_test` -=================================================== - -.. py:module:: fairchem.data.oc.experimental.rattle_test - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.rattle_test.main - - - -.. py:function:: main() - - Checks whether ASE's rattle modifies fixed atoms. - ' - - diff --git a/_sources/autoapi/fairchem/data/oc/experimental/utils/index.rst b/_sources/autoapi/fairchem/data/oc/experimental/utils/index.rst deleted file mode 100644 index 560db4dac..000000000 --- a/_sources/autoapi/fairchem/data/oc/experimental/utils/index.rst +++ /dev/null @@ -1,53 +0,0 @@ -:py:mod:`fairchem.data.oc.experimental.utils` -============================================= - -.. py:module:: fairchem.data.oc.experimental.utils - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.experimental.utils.v0_check - fairchem.data.oc.experimental.utils.restart_bug_check - fairchem.data.oc.experimental.utils.plot_traj - - - -.. py:function:: v0_check(full_traj, initial) - - Checks whether the initial structure as gathered from the POSCAR input file - is in agreement with the initial image of the full trajectory. If not, the - trajectory comes fro the V0 dataset which failed to save intermediate - checkpoints. - - Args - full_traj (list of Atoms objects): Calculated full trajectory. - initial (Atoms object): Starting image provided by POSCAR.. - - -.. py:function:: restart_bug_check(full_traj) - - Observed that some of the trajectories had a strange identically cyclical - behavior - suggesting that a checkpoint was restarted from an earlier - checkpoint rather than the latest. Checks whether the trajectory provided - falls within that bug. - - Args - full_traj (list of Atoms objects): Calculated full trajectory. - - -.. py:function:: plot_traj(traj, fname) - - Plots the energy profile of a given trajectory - - Args - traj (list of Atoms objects): Full trajectory to be plotted - fname (str): Filename to be used as title and save figure as. - - diff --git a/_sources/autoapi/fairchem/data/oc/index.rst b/_sources/autoapi/fairchem/data/oc/index.rst deleted file mode 100644 index 6598b5c52..000000000 --- a/_sources/autoapi/fairchem/data/oc/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.data.oc` -========================== - -.. py:module:: fairchem.data.oc - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - core/index.rst - databases/index.rst - utils/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - structure_generator/index.rst - - diff --git a/_sources/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.rst b/_sources/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.rst deleted file mode 100644 index 4a7fc3cdb..000000000 --- a/_sources/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.rst +++ /dev/null @@ -1,126 +0,0 @@ -:py:mod:`fairchem.data.oc.scripts.precompute_sample_structures` -=============================================================== - -.. py:module:: fairchem.data.oc.scripts.precompute_sample_structures - -.. autoapi-nested-parse:: - - This submodule contains the scripts that the we used to sample the adsorption - structures. - - Note that some of these scripts were taken from - [GASpy](https://github.com/ulissigroup/GASpy) with permission of author. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.scripts.precompute_sample_structures.enumerate_surfaces_for_saving - fairchem.data.oc.scripts.precompute_sample_structures.standardize_bulk - fairchem.data.oc.scripts.precompute_sample_structures.is_structure_invertible - fairchem.data.oc.scripts.precompute_sample_structures.flip_struct - fairchem.data.oc.scripts.precompute_sample_structures.precompute_enumerate_surface - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.scripts.precompute_sample_structures.__authors__ - fairchem.data.oc.scripts.precompute_sample_structures.__email__ - fairchem.data.oc.scripts.precompute_sample_structures.s - - -.. py:data:: __authors__ - :value: ['Kevin Tran', 'Aini Palizhati', 'Siddharth Goyal', 'Zachary Ulissi'] - - - -.. py:data:: __email__ - :value: ['ktran@andrew.cmu.edu'] - - - -.. py:function:: enumerate_surfaces_for_saving(bulk_atoms, max_miller=MAX_MILLER) - - Enumerate all the symmetrically distinct surfaces of a bulk structure. It - will not enumerate surfaces with Miller indices above the `max_miller` - argument. Note that we also look at the bottoms of surfaces if they are - distinct from the top. If they are distinct, we flip the surface so the bottom - is pointing upwards. - - :param bulk_atoms `ase.Atoms` object of the bulk you want to enumerate: surfaces from. - :param max_miller An integer indicating the maximum Miller index of the surfaces: you are willing to enumerate. Increasing this argument will - increase the number of surfaces, but the surfaces will - generally become larger. - - :returns: - - `pymatgen.Structure` - objects for surfaces we have enumerated, the Miller - indices, floats for the shifts, and Booleans for "top". - :rtype: all_slabs_info A list of 4-tuples containing - - -.. py:function:: standardize_bulk(atoms) - - There are many ways to define a bulk unit cell. If you change the unit cell - itself but also change the locations of the atoms within the unit cell, you - can get effectively the same bulk structure. To address this, there is a - standardization method used to reduce the degrees of freedom such that each - unit cell only has one "true" configuration. This function will align a - unit cell you give it to fit within this standardization. - - Arg: - atoms `ase.Atoms` object of the bulk you want to standardize - :returns: standardized_struct `pymatgen.Structure` of the standardized bulk - - -.. py:function:: is_structure_invertible(structure) - - This function figures out whether or not an `pymatgen.Structure` object has - symmetricity. In this function, the affine matrix is a rotation matrix that - is multiplied with the XYZ positions of the crystal. If the z,z component - of that is negative, it means symmetry operation exist, it could be a - mirror operation, or one that involves multiple rotations/etc. Regardless, - it means that the top becomes the bottom and vice-versa, and the structure - is the symmetric. i.e. structure_XYZ = structure_XYZ*M. - - In short: If this function returns `False`, then the input structure can - be flipped in the z-direction to create a new structure. - - Arg: - structure A `pymatgen.Structure` object. - Returns - A boolean indicating whether or not your `ase.Atoms` object is - symmetric in z-direction (i.e. symmetric with respect to x-y plane). - - -.. py:function:: flip_struct(struct) - - Flips an atoms object upside down. Normally used to flip surfaces. - - Arg: - atoms `pymatgen.Structure` object - :returns: - - flipped_struct The same `ase.Atoms` object that was fed as an - argument, but flipped upside down. - - -.. py:function:: precompute_enumerate_surface(bulk_database, bulk_index, opfile) - - -.. py:data:: s - - - diff --git a/_sources/autoapi/fairchem/data/oc/structure_generator/index.rst b/_sources/autoapi/fairchem/data/oc/structure_generator/index.rst deleted file mode 100644 index 8ef68cf0a..000000000 --- a/_sources/autoapi/fairchem/data/oc/structure_generator/index.rst +++ /dev/null @@ -1,105 +0,0 @@ -:py:mod:`fairchem.data.oc.structure_generator` -============================================== - -.. py:module:: fairchem.data.oc.structure_generator - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.structure_generator.StructureGenerator - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.structure_generator.write_surface - fairchem.data.oc.structure_generator.parse_args - fairchem.data.oc.structure_generator.precompute_slabs - fairchem.data.oc.structure_generator.run_placements - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.structure_generator.args - - -.. py:class:: StructureGenerator(args, bulk_index, surface_index, adsorbate_index) - - - A class that creates adsorbate/bulk/slab objects given specified indices, - and writes vasp input files and metadata for multiple placements of the adsorbate - on the slab. You can choose random, heuristic, or both types of placements. - - The output directory structure will have the following nested structure, - where "files" represents the vasp input files and the metadata.pkl: - outputdir/ - bulk0/ - surface0/ - surface/files - ads0/ - heur0/files - heur1/files - rand0/files - ... - ads1/ - ... - surface1/ - ... - bulk1/ - ... - - Precomputed surfaces will be calculated and saved out if they don't - already exist in the provided directory. - - :param args: Contains all command line args - :type args: argparse.Namespace - :param bulk_index: Index of the bulk within the bulk db - :type bulk_index: int - :param surface_index: Index of the surface in the list of all possible surfaces - :type surface_index: int - :param adsorbate_index: Index of the adsorbate within the adsorbate db - :type adsorbate_index: int - - .. py:method:: run() - - Create adsorbate/bulk/surface objects, generate adslab placements, - and write to files. - - - .. py:method:: _write_adslabs(adslab_obj, mode_str) - - Write one set of adslabs (called separately for random and heurstic placements) - - - -.. py:function:: write_surface(args, slab, bulk_index, surface_index) - - Writes vasp inputs and metadata for a specified slab - - -.. py:function:: parse_args() - - -.. py:function:: precompute_slabs(bulk_ind) - - -.. py:function:: run_placements(inputs) - - -.. py:data:: args - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.rst b/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.rst deleted file mode 100644 index 24b6ee97f..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.rst +++ /dev/null @@ -1,70 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.old_tests.check_energy_and_forces` -================================================================== - -.. py:module:: fairchem.data.oc.tests.old_tests.check_energy_and_forces - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_relaxed_forces - fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_adsorption_energy - fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_DFT_energy - fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_positions_across_frames_are_different - fairchem.data.oc.tests.old_tests.check_energy_and_forces.read_pkl - fairchem.data.oc.tests.old_tests.check_energy_and_forces.run_checks - fairchem.data.oc.tests.old_tests.check_energy_and_forces.create_parser - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.check_energy_and_forces.parser - - -.. py:function:: check_relaxed_forces(sid, path, thres) - - Check all forces in the final frame of adslab is less than a threshold. - - -.. py:function:: check_adsorption_energy(sid, path, ref_energy, adsorption_energy) - - -.. py:function:: check_DFT_energy(sid, path, e_tol=0.05) - - Given a relaxation trajectory, check to see if 1. final energy is less than the initial - energy, raise error if not. 2) If the energy decreases throuhghout a trajectory (small spikes are okay). - And 3) if 2 fails, check if it's just a matter of tolerance being too strict by - considering only the first quarter of the trajectory and sampling every 10th frame - to check for _almost_ monotonic decrease in energies. - If any frame(i+1) energy is higher than frame(i) energy, flag it and plot the trajectory. - - -.. py:function:: check_positions_across_frames_are_different(sid, path) - - Given a relaxation trajectory, make sure positions for two consecutive - frames are not identical. - - -.. py:function:: read_pkl(fname) - - -.. py:function:: run_checks(args) - - -.. py:function:: create_parser() - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.rst b/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.rst deleted file mode 100644 index 9df54e4f0..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.rst +++ /dev/null @@ -1,96 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.old_tests.check_inputs` -======================================================= - -.. py:module:: fairchem.data.oc.tests.old_tests.check_inputs - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.check_inputs.obtain_metadata - fairchem.data.oc.tests.old_tests.check_inputs.create_df - fairchem.data.oc.tests.old_tests.check_inputs.adslabs_are_unique - fairchem.data.oc.tests.old_tests.check_inputs.check_commonelems - fairchem.data.oc.tests.old_tests.check_inputs.is_adsorbate_placed_correct - fairchem.data.oc.tests.old_tests.check_inputs._get_connectivity - - - -.. py:function:: obtain_metadata(input_dir, split) - - Get the metadata provided input directory and split of data. - :param input_dir: - :type input_dir: str - :param split: 'val_ood_cat/ads/both', and 'test_ood_cat/ads/both'. - :type split: str - - :returns: - - metadata (tuple) adslab properties. - Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') - - -.. py:function:: create_df(metadata_lst, df_name=None) - - Create a df from metadata to used check_dataset.py file - :param metadata_lst A list of adslab properties in tuple form: contain (mpid, miller index, shift, top, adsorbate smile string, - adsorption cartesion coordinates tuple, and which split the data belongs to). - Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') - :param each tuple should: contain (mpid, miller index, shift, top, adsorbate smile string, - adsorption cartesion coordinates tuple, and which split the data belongs to). - Ex: ('mp-126', (1,1,1), 0.025, True, '*OH', (0,0,0), 'val_ood_ads') - - :returns: df A pandas DataFrame - - -.. py:function:: adslabs_are_unique(df, unique_by=['mpid', 'miller', 'shift', 'top', 'adsorbate', 'adsorption_site']) - - Test if there are duplicate adslabs given a df. If the input is another - format, convert it to df first. - :param df A pd.DataFrame containing metadata of the adslabs being checked.: - :param unique_by df column names that are used to detect duplicates. The default: list is the fingerprints represent a unique adslab. - - -.. py:function:: check_commonelems(df, split1, split2, check='adsorbate') - - Given a df containing all the metadata of the calculations, check to see if there are - any bulk or adsorbate duplicates between train and val/test_ood. The dataframe should - have a "split_tag" column indicate which split (i.e. train, val_ood_ads, etc) a data belongs to. - :param df A pd.DataFrame containing metadata of the adslabs being checked.: - :param split1: 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. - :param split2 two of the splits from 'train': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. - :param 'val_id': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. - :param 'test_id': 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. - :param : 'val_ood_cat/ads/both', or 'test_ood_cat/ads/both'. - - -.. py:function:: is_adsorbate_placed_correct(adslab_input, atoms_tag) - - Make sure all adsorbate atoms are connected after placement. - False means there is at least one isolated adsorbate atom. - It should be used after input generation but before DFT to avoid - unneccessarily computations. - :param adslab_input `ase.Atoms` of the structure in its initial state: - :param atoms_tag: - :type atoms_tag: list - - :returns: - - boolean If there is any stand alone adsorbate atoms after placement, - return False. - - -.. py:function:: _get_connectivity(atoms) - - Generate the connectivity of an atoms obj. - :param atoms An `ase.Atoms` object: - - :returns: matrix The connectivity matrix of the atoms object. - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst b/_sources/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst deleted file mode 100644 index 5bfcb8cb2..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.rst +++ /dev/null @@ -1,53 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory` -======================================================================== - -.. py:module:: fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.get_starting_structure_from_input_dir - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.min_diff - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.compare - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.read_pkl - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.create_parser - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.parser - - -.. py:function:: get_starting_structure_from_input_dir(input_dir) - - -.. py:function:: min_diff(atoms_init, atoms_final) - - Calculate atom wise distances of two atoms object, - taking into account periodic boundary conditions. - - -.. py:function:: compare(args) - - -.. py:function:: read_pkl(fname) - - -.. py:function:: create_parser() - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.rst b/_sources/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.rst deleted file mode 100644 index 93e8dbd91..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.old_tests.verify_correctness` -============================================================= - -.. py:module:: fairchem.data.oc.tests.old_tests.verify_correctness - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.verify_correctness.compare_runs - fairchem.data.oc.tests.old_tests.verify_correctness.create_parser - fairchem.data.oc.tests.old_tests.verify_correctness.main - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.old_tests.verify_correctness.parser - - -.. py:function:: compare_runs(path1, path2, reference_type, tol) - - -.. py:function:: create_parser() - - -.. py:function:: main(args) - - -.. py:data:: parser - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate/index.rst deleted file mode 100644 index c16df1bd0..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_adsorbate` -=============================================== - -.. py:module:: fairchem.data.oc.tests.test_adsorbate - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_adsorbate.TestAdsorbate - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_adsorbate._test_db - fairchem.data.oc.tests.test_adsorbate._test_db_old - - -.. py:data:: _test_db - - - -.. py:data:: _test_db_old - - - -.. py:class:: TestAdsorbate - - - .. py:method:: test_adsorbate_init_from_id() - - - .. py:method:: test_adsorbate_init_from_smiles() - - - .. py:method:: test_adsorbate_init_random() - - - .. py:method:: test_adsorbate_init_from_id_with_db() - - - .. py:method:: test_adsorbate_init_from_smiles_with_db() - - - .. py:method:: test_adsorbate_init_random_with_db() - - - .. py:method:: test_adsorbate_init_reaction_string() - - - .. py:method:: test_adsorbate_init_reaction_string_with_old_db() - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.rst deleted file mode 100644 index 8781a250d..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.rst +++ /dev/null @@ -1,51 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_adsorbate_slab_config` -=========================================================== - -.. py:module:: fairchem.data.oc.tests.test_adsorbate_slab_config - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_adsorbate_slab_config.TestAdslab - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_adsorbate_slab_config.load_data - - - -.. py:function:: load_data(request) - - -.. py:class:: TestAdslab - - - .. py:method:: test_adslab_init() - - - .. py:method:: test_num_augmentations_per_site() - - - .. py:method:: test_placement_overlap() - - Test that the adsorbate does not overlap with the slab. - - - .. py:method:: test_is_adsorbate_com_on_normal() - - - .. py:method:: test_is_adsorbate_binding_atom_on_normal() - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_bulk/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_bulk/index.rst deleted file mode 100644 index b2df88670..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_bulk/index.rst +++ /dev/null @@ -1,76 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_bulk` -========================================== - -.. py:module:: fairchem.data.oc.tests.test_bulk - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_bulk.TestBulk - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_bulk.load_bulk - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_bulk._test_db - - -.. py:function:: load_bulk(request) - - -.. py:data:: _test_db - - - -.. py:class:: TestBulk - - - .. py:method:: test_bulk_init_from_id() - - - .. py:method:: test_bulk_init_from_src_id() - - - .. py:method:: test_bulk_init_random() - - - .. py:method:: test_bulk_init_from_id_with_db() - - - .. py:method:: test_bulk_init_from_src_id_with_db() - - - .. py:method:: test_bulk_init_random_with_db() - - - .. py:method:: test_unique_slab_enumeration() - - - .. py:method:: test_precomputed_slab() - - - .. py:method:: test_slab_miller_enumeration() - - - .. py:method:: get_max_miller(slabs) - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_inputs/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_inputs/index.rst deleted file mode 100644 index 5d2b300a9..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_inputs/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_inputs` -============================================ - -.. py:module:: fairchem.data.oc.tests.test_inputs - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_inputs.TestVasp - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_inputs.load_data - - - -.. py:function:: load_data(request) - - -.. py:class:: TestVasp - - - .. py:method:: test_cleanup() - - - .. py:method:: test_unique_kpts() - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.rst deleted file mode 100644 index 2007d5f9a..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_multi_adsorbate_slab_config` -================================================================= - -.. py:module:: fairchem.data.oc.tests.test_multi_adsorbate_slab_config - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_multi_adsorbate_slab_config.TestMultiAdslab - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_multi_adsorbate_slab_config.load_data - - - -.. py:function:: load_data(request) - - -.. py:class:: TestMultiAdslab - - - .. py:method:: test_num_configurations() - - - .. py:method:: test_adsorbate_indices() - - Test that the adsorbate indices correspond to the unique adsorbates. - - - .. py:method:: test_placement_overlap() - - Test that the adsorbate sites do not overlap with each other. - - - diff --git a/_sources/autoapi/fairchem/data/oc/tests/test_slab/index.rst b/_sources/autoapi/fairchem/data/oc/tests/test_slab/index.rst deleted file mode 100644 index 4ccbbc70e..000000000 --- a/_sources/autoapi/fairchem/data/oc/tests/test_slab/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -:py:mod:`fairchem.data.oc.tests.test_slab` -========================================== - -.. py:module:: fairchem.data.oc.tests.test_slab - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.tests.test_slab.TestSlab - - - - -.. py:class:: TestSlab - - - .. py:method:: test_slab_init_from_id() - - - .. py:method:: test_slab_init_from_specific_millers() - - - .. py:method:: test_slab_init_random() - - - diff --git a/_sources/autoapi/fairchem/data/oc/utils/flag_anomaly/index.rst b/_sources/autoapi/fairchem/data/oc/utils/flag_anomaly/index.rst deleted file mode 100644 index 3bc74cd9a..000000000 --- a/_sources/autoapi/fairchem/data/oc/utils/flag_anomaly/index.rst +++ /dev/null @@ -1,72 +0,0 @@ -:py:mod:`fairchem.data.oc.utils.flag_anomaly` -============================================= - -.. py:module:: fairchem.data.oc.utils.flag_anomaly - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.utils.flag_anomaly.DetectTrajAnomaly - - - - -.. py:class:: DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5) - - - .. py:method:: is_adsorbate_dissociated() - - Tests if the initial adsorbate connectivity is maintained. - - :returns: True if the connectivity was not maintained, otherwise False - :rtype: (bool) - - - .. py:method:: has_surface_changed() - - Tests bond breaking / forming events within a tolerance on the surface so - that systems with significant adsorbate induces surface changes may be discarded - since the reference to the relaxed slab may no longer be valid. - - :returns: True if the surface is reconstructed, otherwise False - :rtype: (bool) - - - .. py:method:: is_adsorbate_desorbed() - - If the adsorbate binding atoms have no connection with slab atoms, - consider it desorbed. - - :returns: True if there is desorption, otherwise False - :rtype: (bool) - - - .. py:method:: _get_connectivity(atoms, cutoff_multiplier=1.0) - - Generate the connectivity of an atoms obj. - - :param atoms: object which will have its connectivity considered - :type atoms: ase.Atoms - :param cutoff_multiplier: cushion for small atom movements when assessing - atom connectivity - :type cutoff_multiplier: float, optional - - :returns: The connectivity matrix of the atoms object. - :rtype: (np.ndarray) - - - .. py:method:: is_adsorbate_intercalated() - - Ensure the adsorbate isn't interacting with an atom that is not allowed to relax. - - :returns: True if any adsorbate atom neighbors a frozen atom, otherwise False - :rtype: (bool) - - - diff --git a/_sources/autoapi/fairchem/data/oc/utils/index.rst b/_sources/autoapi/fairchem/data/oc/utils/index.rst deleted file mode 100644 index 75eae8bc7..000000000 --- a/_sources/autoapi/fairchem/data/oc/utils/index.rst +++ /dev/null @@ -1,82 +0,0 @@ -:py:mod:`fairchem.data.oc.utils` -================================ - -.. py:module:: fairchem.data.oc.utils - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - flag_anomaly/index.rst - vasp/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.utils.DetectTrajAnomaly - - - - -.. py:class:: DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5) - - - .. py:method:: is_adsorbate_dissociated() - - Tests if the initial adsorbate connectivity is maintained. - - :returns: True if the connectivity was not maintained, otherwise False - :rtype: (bool) - - - .. py:method:: has_surface_changed() - - Tests bond breaking / forming events within a tolerance on the surface so - that systems with significant adsorbate induces surface changes may be discarded - since the reference to the relaxed slab may no longer be valid. - - :returns: True if the surface is reconstructed, otherwise False - :rtype: (bool) - - - .. py:method:: is_adsorbate_desorbed() - - If the adsorbate binding atoms have no connection with slab atoms, - consider it desorbed. - - :returns: True if there is desorption, otherwise False - :rtype: (bool) - - - .. py:method:: _get_connectivity(atoms, cutoff_multiplier=1.0) - - Generate the connectivity of an atoms obj. - - :param atoms: object which will have its connectivity considered - :type atoms: ase.Atoms - :param cutoff_multiplier: cushion for small atom movements when assessing - atom connectivity - :type cutoff_multiplier: float, optional - - :returns: The connectivity matrix of the atoms object. - :rtype: (np.ndarray) - - - .. py:method:: is_adsorbate_intercalated() - - Ensure the adsorbate isn't interacting with an atom that is not allowed to relax. - - :returns: True if any adsorbate atom neighbors a frozen atom, otherwise False - :rtype: (bool) - - - diff --git a/_sources/autoapi/fairchem/data/oc/utils/vasp/index.rst b/_sources/autoapi/fairchem/data/oc/utils/vasp/index.rst deleted file mode 100644 index 2592d8a7d..000000000 --- a/_sources/autoapi/fairchem/data/oc/utils/vasp/index.rst +++ /dev/null @@ -1,93 +0,0 @@ -:py:mod:`fairchem.data.oc.utils.vasp` -===================================== - -.. py:module:: fairchem.data.oc.utils.vasp - -.. autoapi-nested-parse:: - - This submodule contains the scripts that the we used to run VASP. - - Note that some of these scripts were taken and modified from - [GASpy](https://github.com/ulissigroup/GASpy) with permission of authors. - - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.utils.vasp._clean_up_inputs - fairchem.data.oc.utils.vasp.calculate_surface_k_points - fairchem.data.oc.utils.vasp.write_vasp_input_files - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.oc.utils.vasp.__author__ - fairchem.data.oc.utils.vasp.__email__ - fairchem.data.oc.utils.vasp.VASP_FLAGS - fairchem.data.oc.utils.vasp.BULK_VASP_FLAGS - - -.. py:data:: __author__ - :value: 'Kevin Tran' - - - -.. py:data:: __email__ - :value: 'ktran@andrew.cmu.edu' - - - -.. py:data:: VASP_FLAGS - - - -.. py:data:: BULK_VASP_FLAGS - - - -.. py:function:: _clean_up_inputs(atoms, vasp_flags) - - Parses the inputs and makes sure some things are straightened out. - - Arg: - atoms `ase.Atoms` object of the structure we want to relax - vasp_flags A dictionary of settings we want to pass to the `Vasp` - calculator - :returns: - - atoms `ase.Atoms` object of the structure we want to relax, but - with the unit vectors fixed (if needed) - vasp_flags A modified version of the 'vasp_flags' argument - - -.. py:function:: calculate_surface_k_points(atoms) - - For surface calculations, it's a good practice to calculate the k-point - mesh given the unit cell size. We do that on-the-spot here. - - Arg: - atoms `ase.Atoms` object of the structure we want to relax - :returns: k_pts A 3-tuple of integers indicating the k-point mesh to use - - -.. py:function:: write_vasp_input_files(atoms, outdir='.', vasp_flags=None) - - Effectively goes through the same motions as the `run_vasp` function, - except it only writes the input files instead of running. - - :param atoms `ase.Atoms` object that we want to relax.: - :param outdir A string indicating where you want to save the input files.: Defaults to '.' - :param vasp_flags A dictionary of settings we want to pass to the `Vasp`: calculator. Defaults to a standerd set of values if `None` - - diff --git a/_sources/autoapi/fairchem/data/odac/force_field/FF_analysis/index.rst b/_sources/autoapi/fairchem/data/odac/force_field/FF_analysis/index.rst deleted file mode 100644 index 13e36c30b..000000000 --- a/_sources/autoapi/fairchem/data/odac/force_field/FF_analysis/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -:py:mod:`fairchem.data.odac.force_field.FF_analysis` -==================================================== - -.. py:module:: fairchem.data.odac.force_field.FF_analysis - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.odac.force_field.FF_analysis.get_data - fairchem.data.odac.force_field.FF_analysis.binned_average - fairchem.data.odac.force_field.FF_analysis.bin_plot - fairchem.data.odac.force_field.FF_analysis.get_Fig4a - fairchem.data.odac.force_field.FF_analysis.get_Fig4b - fairchem.data.odac.force_field.FF_analysis.get_Fig4c - fairchem.data.odac.force_field.FF_analysis.get_Fig4d - fairchem.data.odac.force_field.FF_analysis.phys_err - fairchem.data.odac.force_field.FF_analysis.chem_err - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.odac.force_field.FF_analysis.infile - - -.. py:function:: get_data(infile, limit=2) - - -.. py:function:: binned_average(DFT_ads, pred_err, bins) - - -.. py:function:: bin_plot(ax, bins, heights, **kwargs) - - -.. py:function:: get_Fig4a(raw_error_CO2, raw_error_H2O, b=20, outfile='Fig5a.png') - - -.. py:function:: get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O, outfile='Fig5b.png') - - -.. py:function:: get_Fig4c(DFT_CO2, err_CO2, outfile='Fig5c.png') - - -.. py:function:: get_Fig4d(DFT_H2O, err_H2O, outfile='Fig5d.png') - - -.. py:function:: phys_err(DFT, FF) - - -.. py:function:: chem_err(DFT, FF) - - -.. py:data:: infile - :value: '/storage/home/hcoda1/8/lbrabson3/p-amedford6-0/s2ef/final/data_w_oms.json' - - - diff --git a/_sources/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.rst b/_sources/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.rst deleted file mode 100644 index 1b9e95fc5..000000000 --- a/_sources/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.rst +++ /dev/null @@ -1,208 +0,0 @@ -:py:mod:`fairchem.data.odac.promising_mof.promising_mof_energies.energy` -======================================================================== - -.. py:module:: fairchem.data.odac.promising_mof.promising_mof_energies.energy - - -Module Contents ---------------- - -.. py:data:: raw_ads_energy_data - - - -.. py:data:: complete_data - - - -.. py:data:: temp_split_string - - - -.. py:data:: complete_data - - - -.. py:data:: complete_data_merged_pristine - - - -.. py:data:: complete_data_merged_pristine - - - -.. py:data:: complete_data_merged_defective - - - -.. py:data:: complete_data_merged_defective - - - -.. py:data:: complete_data_merged_pristine_co2 - - - -.. py:data:: complete_data_merged_pristine_h2o - - - -.. py:data:: complete_data_merged_pristine_co_ads - - - -.. py:data:: complete_data_merged_pristine_co_ads_2 - - - -.. py:data:: complete_data_merged_defective_co2 - - - -.. py:data:: complete_data_merged_defective_h2o - - - -.. py:data:: complete_data_merged_defective_co_ads - - - -.. py:data:: complete_data_merged_defective_co_ads_2 - - - -.. py:data:: lowest_energy_data_co2 - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_h2o - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_co_ads - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_co_ads_2 - - - -.. py:data:: current_entry - - - -.. py:data:: adsorption_data - - - -.. py:data:: count - :value: 0 - - - -.. py:data:: lowest_energy_data_co2_defective - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_h2o_defective - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_co_ads_defective - - - -.. py:data:: current_entry - - - -.. py:data:: lowest_energy_data_co_ads_2_defective - - - -.. py:data:: current_entry - - - -.. py:data:: adsorption_data_defective - - - -.. py:data:: unique_combinations_count - - - -.. py:data:: def_counts_df - - - -.. py:data:: mof_name - - - -.. py:data:: missing_DDEC - - - -.. py:data:: missing_DDEC_pristine - - - -.. py:data:: missing_DDEC_defective - - - -.. py:data:: index_drop_ddec_pristine - :value: [] - - - -.. py:data:: adsorption_data - - - -.. py:data:: index_drop_ddec_defective - :value: [] - - - -.. py:data:: adsorption_data_defective - - - -.. py:data:: adsorption_data - - - -.. py:data:: adsorption_data_defective - - - -.. py:data:: promising_pristine - - - -.. py:data:: promising_defective - - - diff --git a/_sources/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.rst b/_sources/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.rst deleted file mode 100644 index d296d291a..000000000 --- a/_sources/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -:py:mod:`fairchem.data.om.biomolecules.geom.sample_geom_drugs` -============================================================== - -.. py:module:: fairchem.data.om.biomolecules.geom.sample_geom_drugs - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.om.biomolecules.geom.sample_geom_drugs.write_pickle - fairchem.data.om.biomolecules.geom.sample_geom_drugs.parse_args - fairchem.data.om.biomolecules.geom.sample_geom_drugs.main - - - -.. py:function:: write_pickle(data, path) - - -.. py:function:: parse_args() - - -.. py:function:: main() - - diff --git a/_sources/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst b/_sources/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst deleted file mode 100644 index 8e8f6a49d..000000000 --- a/_sources/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -:py:mod:`fairchem.data.om.biomolecules.geom.write_geom_drugs_structures` -======================================================================== - -.. py:module:: fairchem.data.om.biomolecules.geom.write_geom_drugs_structures - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.om.biomolecules.geom.write_geom_drugs_structures.parse_args - fairchem.data.om.biomolecules.geom.write_geom_drugs_structures.main - - - -.. py:function:: parse_args() - - -.. py:function:: main() - - diff --git a/_sources/autoapi/fairchem/data/om/omdata/orca/calc/index.rst b/_sources/autoapi/fairchem/data/om/omdata/orca/calc/index.rst deleted file mode 100644 index 96efaf1bc..000000000 --- a/_sources/autoapi/fairchem/data/om/omdata/orca/calc/index.rst +++ /dev/null @@ -1,66 +0,0 @@ -:py:mod:`fairchem.data.om.omdata.orca.calc` -=========================================== - -.. py:module:: fairchem.data.om.omdata.orca.calc - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.om.omdata.orca.calc.write_orca_inputs - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.om.omdata.orca.calc.ORCA_FUNCTIONAL - fairchem.data.om.omdata.orca.calc.ORCA_BASIS - fairchem.data.om.omdata.orca.calc.ORCA_SIMPLE_INPUT - fairchem.data.om.omdata.orca.calc.ORCA_BLOCKS - fairchem.data.om.omdata.orca.calc.ORCA_ASE_SIMPLE_INPUT - fairchem.data.om.omdata.orca.calc.OPT_PARAMETERS - - -.. py:data:: ORCA_FUNCTIONAL - :value: 'wB97M-V' - - - -.. py:data:: ORCA_BASIS - :value: 'def2-TZVPD' - - - -.. py:data:: ORCA_SIMPLE_INPUT - :value: ['EnGrad', 'RIJCOSX', 'def2/J', 'NoUseSym', 'DIIS', 'NOSOSCF', 'NormalConv', 'DEFGRID3', 'ALLPOP', 'NBO'] - - - -.. py:data:: ORCA_BLOCKS - :value: ['%scf Convergence Tight maxiter 300 end', '%elprop Dipole true Quadrupole true end', '%nbo... - - - -.. py:data:: ORCA_ASE_SIMPLE_INPUT - - - -.. py:data:: OPT_PARAMETERS - - - -.. py:function:: write_orca_inputs(atoms, output_directory, charge=0, mult=1, orcasimpleinput=ORCA_ASE_SIMPLE_INPUT, orcablocks=' '.join(ORCA_BLOCKS)) - - One-off method to be used if you wanted to write inputs for an arbitrary - system. Primarily used for debugging. - - diff --git a/_sources/autoapi/fairchem/data/om/omdata/orca/index.rst b/_sources/autoapi/fairchem/data/om/omdata/orca/index.rst deleted file mode 100644 index 5675bd633..000000000 --- a/_sources/autoapi/fairchem/data/om/omdata/orca/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.data.om.omdata.orca` -====================================== - -.. py:module:: fairchem.data.om.omdata.orca - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - calc/index.rst - recipes/index.rst - - diff --git a/_sources/autoapi/fairchem/data/om/omdata/orca/recipes/index.rst b/_sources/autoapi/fairchem/data/om/omdata/orca/recipes/index.rst deleted file mode 100644 index b7cfc29d0..000000000 --- a/_sources/autoapi/fairchem/data/om/omdata/orca/recipes/index.rst +++ /dev/null @@ -1,76 +0,0 @@ -:py:mod:`fairchem.data.om.omdata.orca.recipes` -============================================== - -.. py:module:: fairchem.data.om.omdata.orca.recipes - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.data.om.omdata.orca.recipes.single_point_calculation - fairchem.data.om.omdata.orca.recipes.ase_relaxation - - - -.. py:function:: single_point_calculation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, outputdir=os.getcwd(), **calc_kwargs) - - Wrapper around QUACC's static job to standardize single-point calculations. - See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 - for more details. - - :param atoms: Atoms object - :type atoms: Atoms - :param charge: Charge of system - :type charge: int - :param spin_multiplicity: Multiplicity of the system - :type spin_multiplicity: int - :param xc: Exchange-correlaction functional - :type xc: str - :param basis: Basis set - :type basis: str - :param orcasimpleinput: List of `orcasimpleinput` settings for the calculator - :type orcasimpleinput: list - :param orcablocks: List of `orcablocks` swaps for the calculator - :type orcablocks: list - :param nprocs: Number of processes to parallelize across - :type nprocs: int - :param outputdir: Directory to move results to upon completion - :type outputdir: str - :param calc_kwargs: Additional kwargs for the custom Orca calculator - - -.. py:function:: ase_relaxation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, opt_params=None, outputdir=os.getcwd(), **calc_kwargs) - - Wrapper around QUACC's ase_relax_job to standardize geometry optimizations. - See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 - for more details. - - :param atoms: Atoms object - :type atoms: Atoms - :param charge: Charge of system - :type charge: int - :param spin_multiplicity: Multiplicity of the system - :type spin_multiplicity: int - :param xc: Exchange-correlaction functional - :type xc: str - :param basis: Basis set - :type basis: str - :param orcasimpleinput: List of `orcasimpleinput` settings for the calculator - :type orcasimpleinput: list - :param orcablocks: List of `orcablocks` swaps for the calculator - :type orcablocks: list - :param nprocs: Number of processes to parallelize across - :type nprocs: int - :param opt_params: Dictionary of optimizer parameters - :type opt_params: dict - :param outputdir: Directory to move results to upon completion - :type outputdir: str - :param calc_kwargs: Additional kwargs for the custom Orca calculator - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/client/client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/client/client/index.rst deleted file mode 100644 index 98021eccb..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/client/client/index.rst +++ /dev/null @@ -1,248 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.client.client` -============================================ - -.. py:module:: fairchem.demo.ocpapi.client.client - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.client.Client - - - - -.. py:exception:: RequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`Exception` - - Exception raised any time there is an error while making an API call. - - -.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected for a reason that will - not succeed on retry. For example, this might include a malformed request - or action that is not allowed. - - -.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected because a rate limit has - been exceeded. - - .. attribute:: retry_after - - If known, the time to wait before the next attempt to - call the API should be made. - - -.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') - - - Exposes each route in the OCP API as a method. - - .. py:property:: host - :type: str - - The host being called by this client. - - .. py:method:: get_models() -> fairchem.demo.ocpapi.client.models.Models - :async: - - Fetch the list of models that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The models that are supported in the API. - - - .. py:method:: get_bulks() -> fairchem.demo.ocpapi.client.models.Bulks - :async: - - Fetch the list of bulk materials that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The bulks that are supported throughout the API. - - - .. py:method:: get_adsorbates() -> fairchem.demo.ocpapi.client.models.Adsorbates - :async: - - Fetch the list of adsorbates that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The adsorbates that are supported throughout the API. - - - .. py:method:: get_slabs(bulk: Union[str, fairchem.demo.ocpapi.client.models.Bulk]) -> fairchem.demo.ocpapi.client.models.Slabs - :async: - - Get a unique list of slabs for the input bulk structure. - - :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk - instance to use. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Slabs for each of the unique surfaces of the material. - - - .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs - :async: - - Get a list of possible binding sites for the input adsorbate on the - input slab. - - :param adsorbate: Description of the the adsorbate to place. - :param slab: Information about the slab on which the adsorbate should - be placed. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Configurations for each adsorbate binding site on the slab. - - - .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem - :async: - - Starts relaxations of the input adsorbate configurations on the input - slab using energies and forces returned by the input model. Relaxations - are run asynchronously and results can be fetched using the system id - that is returned from this method. - - :param adsorbate: Description of the adsorbate being simulated. - :param adsorbate_configs: List of adsorbate configurations to relax. This - should only include the adsorbates themselves; the surface is - defined in the "slab" field that is a peer to this one. - :param bulk: Details of the bulk material being simulated. - :param slab: The structure of the slab on which adsorbates are placed. - :param model: The model that will be used to evaluate energies and forces - during relaxations. - :param ephemeral: If False (default), any later attempt to delete the - generated relaxations will be rejected. If True, deleting the - relaxations will be allowed, which is generally useful for - testing when there is no reason for results to be persisted. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: IDs of the relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest - :async: - - Fetches the original relaxations request for the input system. - - :param system_id: The ID of the system to fetch. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The original request that was made when submitting relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults - :async: - - Fetches relaxation results for the input system. - - :param system_id: The system id of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The relaxation results for each configuration in the system. - - - .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None - :async: - - Deletes all relaxation results for the input system. - - :param system_id: The ID of the system to delete. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - - .. py:method:: _run_request(path: str, method: str, **kwargs) -> str - :async: - - Helper method that runs the input request on a thread so that - it doesn't block the event loop on the calling thread. - - :param path: The URL path to make the request against. - :param method: The HTTP method to use (GET, POST, etc.). - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The response body from the request as a string. - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/client/index.rst deleted file mode 100644 index 78e721656..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/client/index.rst +++ /dev/null @@ -1,703 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.client` -===================================== - -.. py:module:: fairchem.demo.ocpapi.client - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - client/index.rst - models/index.rst - ui/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.Client - fairchem.demo.ocpapi.client.Adsorbates - fairchem.demo.ocpapi.client.AdsorbateSlabConfigs - fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult - fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest - fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults - fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsSystem - fairchem.demo.ocpapi.client.Atoms - fairchem.demo.ocpapi.client.Bulk - fairchem.demo.ocpapi.client.Bulks - fairchem.demo.ocpapi.client.Model - fairchem.demo.ocpapi.client.Models - fairchem.demo.ocpapi.client.Slab - fairchem.demo.ocpapi.client.SlabMetadata - fairchem.demo.ocpapi.client.Slabs - fairchem.demo.ocpapi.client.Status - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.get_results_ui_url - - - -.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') - - - Exposes each route in the OCP API as a method. - - .. py:property:: host - :type: str - - The host being called by this client. - - .. py:method:: get_models() -> fairchem.demo.ocpapi.client.models.Models - :async: - - Fetch the list of models that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The models that are supported in the API. - - - .. py:method:: get_bulks() -> fairchem.demo.ocpapi.client.models.Bulks - :async: - - Fetch the list of bulk materials that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The bulks that are supported throughout the API. - - - .. py:method:: get_adsorbates() -> fairchem.demo.ocpapi.client.models.Adsorbates - :async: - - Fetch the list of adsorbates that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The adsorbates that are supported throughout the API. - - - .. py:method:: get_slabs(bulk: Union[str, fairchem.demo.ocpapi.client.models.Bulk]) -> fairchem.demo.ocpapi.client.models.Slabs - :async: - - Get a unique list of slabs for the input bulk structure. - - :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk - instance to use. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Slabs for each of the unique surfaces of the material. - - - .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs - :async: - - Get a list of possible binding sites for the input adsorbate on the - input slab. - - :param adsorbate: Description of the the adsorbate to place. - :param slab: Information about the slab on which the adsorbate should - be placed. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Configurations for each adsorbate binding site on the slab. - - - .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem - :async: - - Starts relaxations of the input adsorbate configurations on the input - slab using energies and forces returned by the input model. Relaxations - are run asynchronously and results can be fetched using the system id - that is returned from this method. - - :param adsorbate: Description of the adsorbate being simulated. - :param adsorbate_configs: List of adsorbate configurations to relax. This - should only include the adsorbates themselves; the surface is - defined in the "slab" field that is a peer to this one. - :param bulk: Details of the bulk material being simulated. - :param slab: The structure of the slab on which adsorbates are placed. - :param model: The model that will be used to evaluate energies and forces - during relaxations. - :param ephemeral: If False (default), any later attempt to delete the - generated relaxations will be rejected. If True, deleting the - relaxations will be allowed, which is generally useful for - testing when there is no reason for results to be persisted. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: IDs of the relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest - :async: - - Fetches the original relaxations request for the input system. - - :param system_id: The ID of the system to fetch. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The original request that was made when submitting relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults - :async: - - Fetches relaxation results for the input system. - - :param system_id: The system id of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The relaxation results for each configuration in the system. - - - .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None - :async: - - Deletes all relaxation results for the input system. - - :param system_id: The ID of the system to delete. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - - .. py:method:: _run_request(path: str, method: str, **kwargs) -> str - :async: - - Helper method that runs the input request on a thread so that - it doesn't block the event loop on the calling thread. - - :param path: The URL path to make the request against. - :param method: The HTTP method to use (GET, POST, etc.). - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The response body from the request as a string. - - - -.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected for a reason that will - not succeed on retry. For example, this might include a malformed request - or action that is not allowed. - - -.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected because a rate limit has - been exceeded. - - .. attribute:: retry_after - - If known, the time to wait before the next attempt to - call the API should be made. - - -.. py:exception:: RequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`Exception` - - Exception raised any time there is an error while making an API call. - - -.. py:class:: Adsorbates - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch adsorbates supported in the - API. - - .. py:attribute:: adsorbates_supported - :type: List[str] - - List of adsorbates that can be used in the API. - - -.. py:class:: AdsorbateSlabConfigs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch placements of a single - absorbate on a slab. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of structures, each representing one possible adsorbate placement. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which the adsorbate is placed. - - -.. py:class:: AdsorbateSlabRelaxationResult - - - Bases: :py:obj:`_DataModel` - - Stores information about a single adsorbate slab configuration, including - outputs for the model used in relaxations. - - The API to fetch relaxation results supports requesting a subset of fields - in order to limit the size of response payloads. Optional attributes will - be defined only if they are including the response. - - .. py:attribute:: config_id - :type: int - - ID of the configuration within the system. - - .. py:attribute:: status - :type: Status - - The status of the request for information about this configuration. - - .. py:attribute:: system_id - :type: Optional[str] - - The ID of the system in which the configuration was originally submitted. - - .. py:attribute:: cell - :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Optional[Tuple[bool, bool, bool]] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: Optional[List[int]] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: Optional[List[Tuple[float, float, float]]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: Optional[List[int]] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:attribute:: energy - :type: Optional[float] - - The energy of the configuration. - - .. py:attribute:: energy_trajectory - :type: Optional[List[float]] - - The energy of the configuration at each point along the relaxation - trajectory. - - .. py:attribute:: forces - :type: Optional[List[Tuple[float, float, float]]] - - The forces on each atom in the relaxed structure. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - The predicted energy and forces will also be copied to the new - ase.Atoms object as a SinglePointCalculator (a calculator that - stores the results of an already-run simulation). - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: AdsorbateSlabRelaxationsRequest - - - Bases: :py:obj:`_DataModel` - - Stores the request to submit a new batch of adsorbate slab relaxations. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of adsorbate placements being relaxed. - - .. py:attribute:: bulk - :type: Bulk - - Information about the original bulk structure used to create the slab. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which adsorbates are placed. - - .. py:attribute:: model - :type: str - - The type of the ML model being used during relaxations. - - .. py:attribute:: ephemeral - :type: Optional[bool] - - Whether the relaxations can be deleted (assume they cannot be deleted if - None). - - .. py:attribute:: adsorbate_reaction - :type: Optional[str] - - If possible, an html-formatted string describing the reaction will be added - to this field. - - -.. py:class:: AdsorbateSlabRelaxationsResults - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for results of adsorbate slab - relaxations. - - .. py:attribute:: configs - :type: List[AdsorbateSlabRelaxationResult] - - List of configurations in the system, each representing one placement of - an adsorbate on a slab surface. - - .. py:attribute:: omitted_config_ids - :type: List[int] - - List of IDs of configurations that were requested but omitted by the - server. Results for these IDs can be requested again. - - -.. py:class:: AdsorbateSlabRelaxationsSystem - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to submit a new batch of adsorbate - slab relaxations. - - .. py:attribute:: system_id - :type: str - - Unique ID for this set of relaxations which can be used to fetch results - later. - - .. py:attribute:: config_ids - :type: List[int] - - The list of IDs assigned to each of the input adsorbate placements, in the - same order in which they were submitted. - - -.. py:class:: Atoms - - - Bases: :py:obj:`_DataModel` - - Subset of the fields from an ASE Atoms object that are used within this - API. - - .. py:attribute:: cell - :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Tuple[bool, bool, bool] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: List[int] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: List[Tuple[float, float, float]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: List[int] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: Bulk - - - Bases: :py:obj:`_DataModel` - - Stores information about a single bulk material. - - .. py:attribute:: src_id - :type: str - - The ID of the material. - - .. py:attribute:: formula - :type: str - - The chemical formula of the material. - - .. py:attribute:: elements - :type: List[str] - - The list of elements in the material. - - -.. py:class:: Bulks - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch bulks supported in the API. - - .. py:attribute:: bulks_supported - :type: List[Bulk] - - List of bulks that can be used in the API. - - -.. py:class:: Model - - - Bases: :py:obj:`_DataModel` - - Stores information about a single model supported in the API. - - .. py:attribute:: id - :type: str - - The ID of the model. - - -.. py:class:: Models - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for models supported in the API. - - .. py:attribute:: models - :type: List[Model] - - The list of models that are supported. - - -.. py:class:: Slab - - - Bases: :py:obj:`_DataModel` - - Stores all information about a slab that is returned from the API. - - .. py:attribute:: atoms - :type: Atoms - - The structure of the slab. - - .. py:attribute:: metadata - :type: SlabMetadata - - Extra information about the slab. - - -.. py:class:: SlabMetadata - - - Bases: :py:obj:`_DataModel` - - Stores metadata about a slab that is returned from the API. - - .. py:attribute:: bulk_src_id - :type: str - - The ID of the bulk material from which the slab was derived. - - .. py:attribute:: millers - :type: Tuple[int, int, int] - - The Miller indices of the slab relative to bulk structure. - - .. py:attribute:: shift - :type: float - - The position along the vector defined by the Miller indices at which a - cut was taken to generate the slab surface. - - .. py:attribute:: top - :type: bool - - If False, the top and bottom surfaces for this millers/shift pair are - distinct and this slab represents the bottom surface. - - -.. py:class:: Slabs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch slabs for a bulk structure. - - .. py:attribute:: slabs - :type: List[Slab] - - The list of slabs that were generated from the input bulk structure. - - -.. py:class:: Status(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Relaxation status of a single adsorbate placement on a slab. - - .. py:attribute:: NOT_AVAILABLE - :value: 'not_available' - - The configuration exists but the result is not yet available. It is - possible that checking again in the future could yield a result. - - .. py:attribute:: FAILED_RELAXATION - :value: 'failed_relaxation' - - The relaxation failed for this configuration. - - .. py:attribute:: SUCCESS - :value: 'success' - - The relaxation was successful and the requested information about the - configuration was returned. - - .. py:attribute:: DOES_NOT_EXIST - :value: 'does_not_exist' - - The requested configuration does not exist. - - .. py:method:: __str__() -> str - - Return str(self). - - - -.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] - - Generates the URL at which results for the input system can be - visualized. - - :param api_host: The API host on which the system was run. - :param system_id: ID of the system being visualized. - - :returns: The URL at which the input system can be visualized. None if the - API host is not recognized. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/client/models/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/client/models/index.rst deleted file mode 100644 index 1ed57ef37..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/client/models/index.rst +++ /dev/null @@ -1,455 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.client.models` -============================================ - -.. py:module:: fairchem.demo.ocpapi.client.models - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.models._DataModel - fairchem.demo.ocpapi.client.models.Model - fairchem.demo.ocpapi.client.models.Models - fairchem.demo.ocpapi.client.models.Bulk - fairchem.demo.ocpapi.client.models.Bulks - fairchem.demo.ocpapi.client.models.Adsorbates - fairchem.demo.ocpapi.client.models.Atoms - fairchem.demo.ocpapi.client.models.SlabMetadata - fairchem.demo.ocpapi.client.models.Slab - fairchem.demo.ocpapi.client.models.Slabs - fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs - fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem - fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest - fairchem.demo.ocpapi.client.models.Status - fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationResult - fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults - - - - -.. py:class:: _DataModel - - - Base class for all data models. - - .. py:attribute:: other_fields - :type: dataclasses_json.CatchAll - - Fields that may have been added to the API that all not yet supported - explicitly in this class. - - -.. py:class:: Model - - - Bases: :py:obj:`_DataModel` - - Stores information about a single model supported in the API. - - .. py:attribute:: id - :type: str - - The ID of the model. - - -.. py:class:: Models - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for models supported in the API. - - .. py:attribute:: models - :type: List[Model] - - The list of models that are supported. - - -.. py:class:: Bulk - - - Bases: :py:obj:`_DataModel` - - Stores information about a single bulk material. - - .. py:attribute:: src_id - :type: str - - The ID of the material. - - .. py:attribute:: formula - :type: str - - The chemical formula of the material. - - .. py:attribute:: elements - :type: List[str] - - The list of elements in the material. - - -.. py:class:: Bulks - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch bulks supported in the API. - - .. py:attribute:: bulks_supported - :type: List[Bulk] - - List of bulks that can be used in the API. - - -.. py:class:: Adsorbates - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch adsorbates supported in the - API. - - .. py:attribute:: adsorbates_supported - :type: List[str] - - List of adsorbates that can be used in the API. - - -.. py:class:: Atoms - - - Bases: :py:obj:`_DataModel` - - Subset of the fields from an ASE Atoms object that are used within this - API. - - .. py:attribute:: cell - :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Tuple[bool, bool, bool] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: List[int] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: List[Tuple[float, float, float]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: List[int] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: SlabMetadata - - - Bases: :py:obj:`_DataModel` - - Stores metadata about a slab that is returned from the API. - - .. py:attribute:: bulk_src_id - :type: str - - The ID of the bulk material from which the slab was derived. - - .. py:attribute:: millers - :type: Tuple[int, int, int] - - The Miller indices of the slab relative to bulk structure. - - .. py:attribute:: shift - :type: float - - The position along the vector defined by the Miller indices at which a - cut was taken to generate the slab surface. - - .. py:attribute:: top - :type: bool - - If False, the top and bottom surfaces for this millers/shift pair are - distinct and this slab represents the bottom surface. - - -.. py:class:: Slab - - - Bases: :py:obj:`_DataModel` - - Stores all information about a slab that is returned from the API. - - .. py:attribute:: atoms - :type: Atoms - - The structure of the slab. - - .. py:attribute:: metadata - :type: SlabMetadata - - Extra information about the slab. - - -.. py:class:: Slabs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch slabs for a bulk structure. - - .. py:attribute:: slabs - :type: List[Slab] - - The list of slabs that were generated from the input bulk structure. - - -.. py:class:: AdsorbateSlabConfigs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch placements of a single - absorbate on a slab. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of structures, each representing one possible adsorbate placement. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which the adsorbate is placed. - - -.. py:class:: AdsorbateSlabRelaxationsSystem - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to submit a new batch of adsorbate - slab relaxations. - - .. py:attribute:: system_id - :type: str - - Unique ID for this set of relaxations which can be used to fetch results - later. - - .. py:attribute:: config_ids - :type: List[int] - - The list of IDs assigned to each of the input adsorbate placements, in the - same order in which they were submitted. - - -.. py:class:: AdsorbateSlabRelaxationsRequest - - - Bases: :py:obj:`_DataModel` - - Stores the request to submit a new batch of adsorbate slab relaxations. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of adsorbate placements being relaxed. - - .. py:attribute:: bulk - :type: Bulk - - Information about the original bulk structure used to create the slab. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which adsorbates are placed. - - .. py:attribute:: model - :type: str - - The type of the ML model being used during relaxations. - - .. py:attribute:: ephemeral - :type: Optional[bool] - - Whether the relaxations can be deleted (assume they cannot be deleted if - None). - - .. py:attribute:: adsorbate_reaction - :type: Optional[str] - - If possible, an html-formatted string describing the reaction will be added - to this field. - - -.. py:class:: Status(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Relaxation status of a single adsorbate placement on a slab. - - .. py:attribute:: NOT_AVAILABLE - :value: 'not_available' - - The configuration exists but the result is not yet available. It is - possible that checking again in the future could yield a result. - - .. py:attribute:: FAILED_RELAXATION - :value: 'failed_relaxation' - - The relaxation failed for this configuration. - - .. py:attribute:: SUCCESS - :value: 'success' - - The relaxation was successful and the requested information about the - configuration was returned. - - .. py:attribute:: DOES_NOT_EXIST - :value: 'does_not_exist' - - The requested configuration does not exist. - - .. py:method:: __str__() -> str - - Return str(self). - - - -.. py:class:: AdsorbateSlabRelaxationResult - - - Bases: :py:obj:`_DataModel` - - Stores information about a single adsorbate slab configuration, including - outputs for the model used in relaxations. - - The API to fetch relaxation results supports requesting a subset of fields - in order to limit the size of response payloads. Optional attributes will - be defined only if they are including the response. - - .. py:attribute:: config_id - :type: int - - ID of the configuration within the system. - - .. py:attribute:: status - :type: Status - - The status of the request for information about this configuration. - - .. py:attribute:: system_id - :type: Optional[str] - - The ID of the system in which the configuration was originally submitted. - - .. py:attribute:: cell - :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Optional[Tuple[bool, bool, bool]] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: Optional[List[int]] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: Optional[List[Tuple[float, float, float]]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: Optional[List[int]] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:attribute:: energy - :type: Optional[float] - - The energy of the configuration. - - .. py:attribute:: energy_trajectory - :type: Optional[List[float]] - - The energy of the configuration at each point along the relaxation - trajectory. - - .. py:attribute:: forces - :type: Optional[List[Tuple[float, float, float]]] - - The forces on each atom in the relaxed structure. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - The predicted energy and forces will also be copied to the new - ase.Atoms object as a SinglePointCalculator (a calculator that - stores the results of an already-run simulation). - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: AdsorbateSlabRelaxationsResults - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for results of adsorbate slab - relaxations. - - .. py:attribute:: configs - :type: List[AdsorbateSlabRelaxationResult] - - List of configurations in the system, each representing one placement of - an adsorbate on a slab surface. - - .. py:attribute:: omitted_config_ids - :type: List[int] - - List of IDs of configurations that were requested but omitted by the - server. Results for these IDs can be requested again. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/client/ui/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/client/ui/index.rst deleted file mode 100644 index 158b8e3f6..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/client/ui/index.rst +++ /dev/null @@ -1,44 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.client.ui` -======================================== - -.. py:module:: fairchem.demo.ocpapi.client.ui - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.ui.get_results_ui_url - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.client.ui._API_TO_UI_HOSTS - - -.. py:data:: _API_TO_UI_HOSTS - :type: Dict[str, str] - - - -.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] - - Generates the URL at which results for the input system can be - visualized. - - :param api_host: The API host on which the system was run. - :param system_id: ID of the system being visualized. - - :returns: The URL at which the input system can be visualized. None if the - API host is not recognized. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/index.rst deleted file mode 100644 index f4ff815fa..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/index.rst +++ /dev/null @@ -1,1005 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi` -============================== - -.. py:module:: fairchem.demo.ocpapi - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - client/index.rst - tests/index.rst - workflows/index.rst - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - version/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.Client - fairchem.demo.ocpapi.Adsorbates - fairchem.demo.ocpapi.AdsorbateSlabConfigs - fairchem.demo.ocpapi.AdsorbateSlabRelaxationResult - fairchem.demo.ocpapi.AdsorbateSlabRelaxationsRequest - fairchem.demo.ocpapi.AdsorbateSlabRelaxationsResults - fairchem.demo.ocpapi.AdsorbateSlabRelaxationsSystem - fairchem.demo.ocpapi.Atoms - fairchem.demo.ocpapi.Bulk - fairchem.demo.ocpapi.Bulks - fairchem.demo.ocpapi.Model - fairchem.demo.ocpapi.Models - fairchem.demo.ocpapi.Slab - fairchem.demo.ocpapi.SlabMetadata - fairchem.demo.ocpapi.Slabs - fairchem.demo.ocpapi.Status - fairchem.demo.ocpapi.AdsorbateBindingSites - fairchem.demo.ocpapi.AdsorbateSlabRelaxations - fairchem.demo.ocpapi.Lifetime - fairchem.demo.ocpapi.keep_all_slabs - fairchem.demo.ocpapi.keep_slabs_with_miller_indices - fairchem.demo.ocpapi.prompt_for_slabs_to_keep - fairchem.demo.ocpapi.RateLimitLogging - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.get_results_ui_url - fairchem.demo.ocpapi.find_adsorbate_binding_sites - fairchem.demo.ocpapi.get_adsorbate_slab_relaxation_results - fairchem.demo.ocpapi.wait_for_adsorbate_slab_relaxations - fairchem.demo.ocpapi.retry_api_calls - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.NO_LIMIT - fairchem.demo.ocpapi.NoLimitType - - -.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') - - - Exposes each route in the OCP API as a method. - - .. py:property:: host - :type: str - - The host being called by this client. - - .. py:method:: get_models() -> fairchem.demo.ocpapi.client.models.Models - :async: - - Fetch the list of models that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The models that are supported in the API. - - - .. py:method:: get_bulks() -> fairchem.demo.ocpapi.client.models.Bulks - :async: - - Fetch the list of bulk materials that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The bulks that are supported throughout the API. - - - .. py:method:: get_adsorbates() -> fairchem.demo.ocpapi.client.models.Adsorbates - :async: - - Fetch the list of adsorbates that are supported in the API. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The adsorbates that are supported throughout the API. - - - .. py:method:: get_slabs(bulk: Union[str, fairchem.demo.ocpapi.client.models.Bulk]) -> fairchem.demo.ocpapi.client.models.Slabs - :async: - - Get a unique list of slabs for the input bulk structure. - - :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk - instance to use. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Slabs for each of the unique surfaces of the material. - - - .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs - :async: - - Get a list of possible binding sites for the input adsorbate on the - input slab. - - :param adsorbate: Description of the the adsorbate to place. - :param slab: Information about the slab on which the adsorbate should - be placed. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: Configurations for each adsorbate binding site on the slab. - - - .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem - :async: - - Starts relaxations of the input adsorbate configurations on the input - slab using energies and forces returned by the input model. Relaxations - are run asynchronously and results can be fetched using the system id - that is returned from this method. - - :param adsorbate: Description of the adsorbate being simulated. - :param adsorbate_configs: List of adsorbate configurations to relax. This - should only include the adsorbates themselves; the surface is - defined in the "slab" field that is a peer to this one. - :param bulk: Details of the bulk material being simulated. - :param slab: The structure of the slab on which adsorbates are placed. - :param model: The model that will be used to evaluate energies and forces - during relaxations. - :param ephemeral: If False (default), any later attempt to delete the - generated relaxations will be rejected. If True, deleting the - relaxations will be allowed, which is generally useful for - testing when there is no reason for results to be persisted. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: IDs of the relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest - :async: - - Fetches the original relaxations request for the input system. - - :param system_id: The ID of the system to fetch. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The original request that was made when submitting relaxations. - - - .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults - :async: - - Fetches relaxation results for the input system. - - :param system_id: The system id of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The relaxation results for each configuration in the system. - - - .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None - :async: - - Deletes all relaxation results for the input system. - - :param system_id: The ID of the system to delete. - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - - .. py:method:: _run_request(path: str, method: str, **kwargs) -> str - :async: - - Helper method that runs the input request on a thread so that - it doesn't block the event loop on the calling thread. - - :param path: The URL path to make the request against. - :param method: The HTTP method to use (GET, POST, etc.). - - :raises RateLimitExceededException: If the call was rejected because a - server side rate limit was breached. - :raises NonRetryableRequestException: If the call was rejected and a retry - is not expected to succeed. - :raises RequestException: For all other errors when making the request; it - is possible, though not guaranteed, that a retry could succeed. - - :returns: The response body from the request as a string. - - - -.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected for a reason that will - not succeed on retry. For example, this might include a malformed request - or action that is not allowed. - - -.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) - - - Bases: :py:obj:`RequestException` - - Exception raised when an API call is rejected because a rate limit has - been exceeded. - - .. attribute:: retry_after - - If known, the time to wait before the next attempt to - call the API should be made. - - -.. py:exception:: RequestException(method: str, url: str, cause: str) - - - Bases: :py:obj:`Exception` - - Exception raised any time there is an error while making an API call. - - -.. py:class:: Adsorbates - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch adsorbates supported in the - API. - - .. py:attribute:: adsorbates_supported - :type: List[str] - - List of adsorbates that can be used in the API. - - -.. py:class:: AdsorbateSlabConfigs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch placements of a single - absorbate on a slab. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of structures, each representing one possible adsorbate placement. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which the adsorbate is placed. - - -.. py:class:: AdsorbateSlabRelaxationResult - - - Bases: :py:obj:`_DataModel` - - Stores information about a single adsorbate slab configuration, including - outputs for the model used in relaxations. - - The API to fetch relaxation results supports requesting a subset of fields - in order to limit the size of response payloads. Optional attributes will - be defined only if they are including the response. - - .. py:attribute:: config_id - :type: int - - ID of the configuration within the system. - - .. py:attribute:: status - :type: Status - - The status of the request for information about this configuration. - - .. py:attribute:: system_id - :type: Optional[str] - - The ID of the system in which the configuration was originally submitted. - - .. py:attribute:: cell - :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Optional[Tuple[bool, bool, bool]] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: Optional[List[int]] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: Optional[List[Tuple[float, float, float]]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: Optional[List[int]] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:attribute:: energy - :type: Optional[float] - - The energy of the configuration. - - .. py:attribute:: energy_trajectory - :type: Optional[List[float]] - - The energy of the configuration at each point along the relaxation - trajectory. - - .. py:attribute:: forces - :type: Optional[List[Tuple[float, float, float]]] - - The forces on each atom in the relaxed structure. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - The predicted energy and forces will also be copied to the new - ase.Atoms object as a SinglePointCalculator (a calculator that - stores the results of an already-run simulation). - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: AdsorbateSlabRelaxationsRequest - - - Bases: :py:obj:`_DataModel` - - Stores the request to submit a new batch of adsorbate slab relaxations. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: adsorbate_configs - :type: List[Atoms] - - List of adsorbate placements being relaxed. - - .. py:attribute:: bulk - :type: Bulk - - Information about the original bulk structure used to create the slab. - - .. py:attribute:: slab - :type: Slab - - The structure of the slab on which adsorbates are placed. - - .. py:attribute:: model - :type: str - - The type of the ML model being used during relaxations. - - .. py:attribute:: ephemeral - :type: Optional[bool] - - Whether the relaxations can be deleted (assume they cannot be deleted if - None). - - .. py:attribute:: adsorbate_reaction - :type: Optional[str] - - If possible, an html-formatted string describing the reaction will be added - to this field. - - -.. py:class:: AdsorbateSlabRelaxationsResults - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for results of adsorbate slab - relaxations. - - .. py:attribute:: configs - :type: List[AdsorbateSlabRelaxationResult] - - List of configurations in the system, each representing one placement of - an adsorbate on a slab surface. - - .. py:attribute:: omitted_config_ids - :type: List[int] - - List of IDs of configurations that were requested but omitted by the - server. Results for these IDs can be requested again. - - -.. py:class:: AdsorbateSlabRelaxationsSystem - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to submit a new batch of adsorbate - slab relaxations. - - .. py:attribute:: system_id - :type: str - - Unique ID for this set of relaxations which can be used to fetch results - later. - - .. py:attribute:: config_ids - :type: List[int] - - The list of IDs assigned to each of the input adsorbate placements, in the - same order in which they were submitted. - - -.. py:class:: Atoms - - - Bases: :py:obj:`_DataModel` - - Subset of the fields from an ASE Atoms object that are used within this - API. - - .. py:attribute:: cell - :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] - - 3x3 matrix with unit cell vectors. - - .. py:attribute:: pbc - :type: Tuple[bool, bool, bool] - - Whether the structure is periodic along the a, b, and c lattice vectors, - respectively. - - .. py:attribute:: numbers - :type: List[int] - - The atomic number of each atom in the unit cell. - - .. py:attribute:: positions - :type: List[Tuple[float, float, float]] - - The coordinates of each atom in the unit cell, relative to the cartesian - frame. - - .. py:attribute:: tags - :type: List[int] - - Labels for each atom in the unit cell where 0 represents a subsurface atom - (fixed during optimization), 1 represents a surface atom, and 2 represents - an adsorbate atom. - - .. py:method:: to_ase_atoms() -> ase.Atoms - - Creates an ase.Atoms object with the positions, element numbers, - etc. populated from values on this object. - - :returns: ase.Atoms object with values from this object. - - - -.. py:class:: Bulk - - - Bases: :py:obj:`_DataModel` - - Stores information about a single bulk material. - - .. py:attribute:: src_id - :type: str - - The ID of the material. - - .. py:attribute:: formula - :type: str - - The chemical formula of the material. - - .. py:attribute:: elements - :type: List[str] - - The list of elements in the material. - - -.. py:class:: Bulks - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch bulks supported in the API. - - .. py:attribute:: bulks_supported - :type: List[Bulk] - - List of bulks that can be used in the API. - - -.. py:class:: Model - - - Bases: :py:obj:`_DataModel` - - Stores information about a single model supported in the API. - - .. py:attribute:: id - :type: str - - The ID of the model. - - -.. py:class:: Models - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request for models supported in the API. - - .. py:attribute:: models - :type: List[Model] - - The list of models that are supported. - - -.. py:class:: Slab - - - Bases: :py:obj:`_DataModel` - - Stores all information about a slab that is returned from the API. - - .. py:attribute:: atoms - :type: Atoms - - The structure of the slab. - - .. py:attribute:: metadata - :type: SlabMetadata - - Extra information about the slab. - - -.. py:class:: SlabMetadata - - - Bases: :py:obj:`_DataModel` - - Stores metadata about a slab that is returned from the API. - - .. py:attribute:: bulk_src_id - :type: str - - The ID of the bulk material from which the slab was derived. - - .. py:attribute:: millers - :type: Tuple[int, int, int] - - The Miller indices of the slab relative to bulk structure. - - .. py:attribute:: shift - :type: float - - The position along the vector defined by the Miller indices at which a - cut was taken to generate the slab surface. - - .. py:attribute:: top - :type: bool - - If False, the top and bottom surfaces for this millers/shift pair are - distinct and this slab represents the bottom surface. - - -.. py:class:: Slabs - - - Bases: :py:obj:`_DataModel` - - Stores the response from a request to fetch slabs for a bulk structure. - - .. py:attribute:: slabs - :type: List[Slab] - - The list of slabs that were generated from the input bulk structure. - - -.. py:class:: Status(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Relaxation status of a single adsorbate placement on a slab. - - .. py:attribute:: NOT_AVAILABLE - :value: 'not_available' - - The configuration exists but the result is not yet available. It is - possible that checking again in the future could yield a result. - - .. py:attribute:: FAILED_RELAXATION - :value: 'failed_relaxation' - - The relaxation failed for this configuration. - - .. py:attribute:: SUCCESS - :value: 'success' - - The relaxation was successful and the requested information about the - configuration was returned. - - .. py:attribute:: DOES_NOT_EXIST - :value: 'does_not_exist' - - The requested configuration does not exist. - - .. py:method:: __str__() -> str - - Return str(self). - - - -.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] - - Generates the URL at which results for the input system can be - visualized. - - :param api_host: The API host on which the system was run. - :param system_id: ID of the system being visualized. - - :returns: The URL at which the input system can be visualized. None if the - API host is not recognized. - - -.. py:class:: AdsorbateBindingSites - - - Stores the inputs and results of a set of relaxations of adsorbate - placements on the surface of a slab. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: bulk - :type: fairchem.demo.ocpapi.client.Bulk - - The bulk material that was being modeled. - - .. py:attribute:: model - :type: str - - The type of the model that was run. - - .. py:attribute:: slabs - :type: List[AdsorbateSlabRelaxations] - - The list of slabs that were generated from the bulk structure. Each - contains its own list of adsorbate placements. - - -.. py:class:: AdsorbateSlabRelaxations - - - Stores the relaxations of adsorbate placements on the surface of a slab. - - .. py:attribute:: slab - :type: fairchem.demo.ocpapi.client.Slab - - The slab on which the adsorbate was placed. - - .. py:attribute:: configs - :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - - Details of the relaxation of each adsorbate placement, including the - final position. - - .. py:attribute:: system_id - :type: str - - The ID of the system that stores all of the relaxations. - - .. py:attribute:: api_host - :type: str - - The API host on which the relaxations were run. - - .. py:attribute:: ui_url - :type: Optional[str] - - The URL at which results can be visualized. - - -.. py:class:: Lifetime(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Represents different lifetimes when running relaxations. - - .. py:attribute:: SAVE - - The relaxation will be available on API servers indefinitely. It will not - be possible to delete the relaxation in the future. - - .. py:attribute:: MARK_EPHEMERAL - - The relaxation will be saved on API servers, but can be deleted at any time - in the future. - - .. py:attribute:: DELETE - - The relaxation will be deleted from API servers as soon as the results have - been fetched. - - -.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when an adsorbate is not supported in the API. - - -.. py:exception:: UnsupportedBulkException(bulk: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a bulk material is not supported in the API. - - -.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a model is not supported in the API. - - -.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites - :async: - - Search for adsorbate binding sites on surfaces of a bulk material. - This executes the following steps: - - 1. Ensure that both the adsorbate and bulk are supported in the - OCP API. - 2. Enumerate unique surfaces from the bulk material. - 3. Enumerate likely binding sites for the input adsorbate on each - of the generated surfaces. - 4. Filter the list of generated adsorbate/slab (adslab) configurations - using the input adslab_filter. - 5. Relax each generated surface+adsorbate structure by refining - atomic positions to minimize forces generated by the input model. - - :param adsorbate: Description of the adsorbate to place. - :param bulk: The ID (typically Materials Project MP ID) of the bulk material - on which the adsorbate will be placed. - :param model: The type of the model to use when calculating forces during - relaxations. - :param adslab_filter: A function that modifies the set of adsorbate/slab - configurations that will be relaxed. This can be used to subselect - slabs and/or adsorbate configurations. - :param client: The OCP API client to use. - :param lifetime: Whether relaxations should be saved on the server, be marked - as ephemeral (allowing them to deleted in the future), or deleted - immediately. - - :returns: Details of each adsorbate binding site, including results of relaxing - to locally-optimized positions using the input model. - - :raises UnsupportedModelException: If the requested model is not supported. - :raises UnsupportedBulkException: If the requested bulk is not supported. - :raises UnsupportedAdsorbateException: If the requested adsorbate is not - supported. - - -.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - :async: - - Wrapper around Client.get_adsorbate_slab_relaxations_results() that - handles retries, including re-fetching individual configurations that - are initially omitted. - - :param client: The client to use when making API calls. - :param system_id: The system ID of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :returns: List of relaxation results, one for each adsorbate configuration in - the system. - - -.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] - :async: - - Blocks until all relaxations in the input system have finished, whether - successfully or not. - - Relaxations are queued in the API, waiting until machines are ready to - run them. Once started, they can take 1-2 minutes to finish. This method - initially sleeps "slow_interval_sec" seconds between each check for any - relaxations having finished. Once at least one result is ready, subsequent - sleeps are for "fast_interval_sec" seconds. - - :param system_id: The ID of the system for which relaxations are running. - :param check_immediately: If False (default), sleep before the first check - for relaxations having finished. If True, check whether relaxations - have finished immediately on entering this function. - :param slow_interval_sec: The number of seconds to wait between each check - while all are still running. - :param fast_interval_sec: The number of seconds to wait between each check - when at least one relaxation has finished in the system. - :param pbar: A tqdm instance that tracks the number of configurations that - have finished. This will be updated with the number of individual - configurations whose relaxations have finished. - :param client: The client to use when making API calls. - - :returns: Map of config IDs in the system to their terminal status. - - -.. py:class:: keep_all_slabs - - - Adslab filter than returns all slabs. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) - - - Adslab filter that keeps any slabs with the configured miller indices. - Slabs with other miller indices will be ignored. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: prompt_for_slabs_to_keep - - - Adslab filter than presents the user with an interactive prompt to choose - which of the input slabs to keep. - - .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] - :staticmethod: - - Generates a sort key from the input adslab. Returns the miller indices, - shift, and top/bottom label so that they will be sorted by those values - in that order. - - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:data:: NO_LIMIT - :type: NoLimitType - :value: 0 - - - -.. py:data:: NoLimitType - - - -.. py:class:: RateLimitLogging - - - Controls logging when rate limits are hit. - - .. py:attribute:: logger - :type: logging.Logger - - The logger to use. - - .. py:attribute:: action - :type: str - - A short description of the action being attempted. - - -.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any - - Decorator with sensible defaults for retrying calls to the OCP API. - - :param max_attempts: The maximum number of calls to make. If NO_LIMIT, - retries will be made forever. - :param rate_limit_logging: If not None, log statements will be generated - using this configuration when a rate limit is hit. - :param fixed_wait_sec: The fixed number of seconds to wait when retrying an - exception that does *not* include a retry-after value. The default - value is sensible; this is exposed mostly for testing. - :param max_jitter_sec: The maximum number of seconds that will be randomly - added to wait times. The default value is sensible; this is exposed - mostly for testing. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/index.rst deleted file mode 100644 index 472a55137..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests` -==================================== - -.. py:module:: fairchem.demo.ocpapi.tests - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - integration/index.rst - unit/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.rst deleted file mode 100644 index 2ec6e7aa7..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration.client` -======================================================= - -.. py:module:: fairchem.demo.ocpapi.tests.integration.client - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - test_client/index.rst - test_ui/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.rst deleted file mode 100644 index 438a29024..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.rst +++ /dev/null @@ -1,105 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration.client.test_client` -=================================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.integration.client.test_client - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.integration.client.test_client.TestClient - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.integration.client.test_client._ensure_system_deleted - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.integration.client.test_client.log - - -.. py:data:: log - - - -.. py:function:: _ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> AsyncGenerator[None, None] - :async: - - Immediately yields control to the caller. When control returns to this - function, try to delete the system with the input id. - - -.. py:class:: TestClient(methodName='runTest') - - - Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` - - Tests that calls to a real server are handled correctly. - - .. py:attribute:: CLIENT - :type: fairchem.demo.ocpapi.client.Client - - - - .. py:attribute:: KNOWN_SYSTEM_ID - :type: str - :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' - - - - .. py:method:: test_get_models() -> None - :async: - - - .. py:method:: test_get_bulks() -> None - :async: - - - .. py:method:: test_get_adsorbates() -> None - :async: - - - .. py:method:: test_get_slabs() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_configs() -> None - :async: - - - .. py:method:: test_submit_adsorbate_slab_relaxations__gemnet_oc() -> None - :async: - - - .. py:method:: test_submit_adsorbate_slab_relaxations__equiformer_v2() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_request() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_results__all_fields_and_configs() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_results__limited_fields_and_configs() -> None - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.rst deleted file mode 100644 index 197ffc869..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration.client.test_ui` -=============================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.integration.client.test_ui - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.integration.client.test_ui.TestUI - - - - -.. py:class:: TestUI(methodName='runTest') - - - Bases: :py:obj:`unittest.TestCase` - - Tests that calls to a real server are handled correctly. - - .. py:attribute:: API_HOST - :type: str - :value: 'open-catalyst-api.metademolab.com' - - - - .. py:attribute:: KNOWN_SYSTEM_ID - :type: str - :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' - - - - .. py:method:: test_get_results_ui_url() -> None - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/index.rst deleted file mode 100644 index 3f4c1e200..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration` -================================================ - -.. py:module:: fairchem.demo.ocpapi.tests.integration - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - client/index.rst - workflows/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.rst deleted file mode 100644 index 6748a2e59..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration.workflows` -========================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.integration.workflows - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - test_adsorbates/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.rst deleted file mode 100644 index ab0e3cc86..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.rst +++ /dev/null @@ -1,50 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates` -========================================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates.TestAdsorbates - - - - -.. py:class:: TestAdsorbates(methodName='runTest') - - - Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` - - Tests that workflow methods run against a real server execute correctly. - - .. py:attribute:: CLIENT - :type: fairchem.demo.ocpapi.client.Client - - - - .. py:attribute:: KNOWN_SYSTEM_ID - :type: str - :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' - - - - .. py:method:: test_get_adsorbate_slab_relaxation_results() -> None - :async: - - - .. py:method:: test_wait_for_adsorbate_slab_relaxations() -> None - :async: - - - .. py:method:: test_find_adsorbate_binding_sites() -> None - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.rst deleted file mode 100644 index f3c77359d..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.client` -================================================ - -.. py:module:: fairchem.demo.ocpapi.tests.unit.client - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - test_client/index.rst - test_models/index.rst - test_ui/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.rst deleted file mode 100644 index 4930eed23..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.rst +++ /dev/null @@ -1,78 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.client.test_client` -============================================================ - -.. py:module:: fairchem.demo.ocpapi.tests.unit.client.test_client - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.client.test_client.TestClient - - - - -.. py:class:: TestClient(methodName='runTest') - - - Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` - - Tests with mocked responses to ensure that they are handled correctly. - - .. py:method:: _run_common_tests_against_route(method: str, route: str, client_method_name: str, successful_response_code: int, successful_response_body: str, successful_response_object: Optional[fairchem.demo.ocpapi.client.models._DataModel], client_method_args: Optional[Dict[str, Any]] = None, expected_request_params: Optional[Dict[str, Any]] = None, expected_request_body: Optional[Dict[str, Any]] = None) -> None - :async: - - - .. py:method:: test_host() -> None - - - .. py:method:: test_get_models() -> None - :async: - - - .. py:method:: test_get_bulks() -> None - :async: - - - .. py:method:: test_get_adsorbates() -> None - :async: - - - .. py:method:: test_get_slabs__bulk_by_id() -> None - :async: - - - .. py:method:: test_get_slabs__bulk_by_obj() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_configurations() -> None - :async: - - - .. py:method:: test_submit_adsorbate_slab_relaxations() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_request() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_results__all_args() -> None - :async: - - - .. py:method:: test_get_adsorbate_slab_relaxations_results__req_args_only() -> None - :async: - - - .. py:method:: test_delete_adsorbate_slab_relaxations() -> None - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.rst deleted file mode 100644 index cef7242c4..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.rst +++ /dev/null @@ -1,207 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.client.test_models` -============================================================ - -.. py:module:: fairchem.demo.ocpapi.tests.unit.client.test_models - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.client.test_models.ModelTestWrapper - fairchem.demo.ocpapi.tests.unit.client.test_models.TestModel - fairchem.demo.ocpapi.tests.unit.client.test_models.TestModels - fairchem.demo.ocpapi.tests.unit.client.test_models.TestBulk - fairchem.demo.ocpapi.tests.unit.client.test_models.TestBulks - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbates - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAtoms - fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlabMetadata - fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlab - fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlabs - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabConfigs - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsSystem - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest_req_fields_only - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult_req_fields_only - fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsResults - - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.client.test_models.T - - -.. py:data:: T - - - -.. py:class:: ModelTestWrapper - - - .. py:class:: ModelTest(*args: Any, obj: T, obj_json: str, **kwargs: Any) - - - Bases: :py:obj:`unittest.TestCase`, :py:obj:`Generic`\ [\ :py:obj:`T`\ ] - - Base class for all tests below that assert behavior of data models. - - .. py:method:: test_from_json() -> None - - - .. py:method:: test_to_json() -> None - - - .. py:method:: assertJsonEqual(first: str, second: str) -> None - - Compares two JSON-formatted strings by deserializing them and then - comparing the generated built-in types. - - - - -.. py:class:: TestModel(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Model`\ ] - - Serde tests for the Model data model. - - -.. py:class:: TestModels(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Models`\ ] - - Serde tests for the Models data model. - - -.. py:class:: TestBulk(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Bulk`\ ] - - Serde tests for the Bulk data model. - - -.. py:class:: TestBulks(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Bulks`\ ] - - Serde tests for the Bulks data model. - - -.. py:class:: TestAdsorbates(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Adsorbates`\ ] - - Serde tests for the Adsorbates data model. - - -.. py:class:: TestAtoms(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Atoms`\ ] - - Serde tests for the Atoms data model. - - .. py:method:: test_to_ase_atoms() -> None - - - -.. py:class:: TestSlabMetadata(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.SlabMetadata`\ ] - - Serde tests for the SlabMetadata data model. - - -.. py:class:: TestSlab(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Slab`\ ] - - Serde tests for the Slab data model. - - -.. py:class:: TestSlabs(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Slabs`\ ] - - Serde tests for the Slabs data model. - - -.. py:class:: TestAdsorbateSlabConfigs(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabConfigs`\ ] - - Serde tests for the AdsorbateSlabConfigs data model. - - -.. py:class:: TestAdsorbateSlabRelaxationsSystem(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsSystem`\ ] - - Serde tests for the AdsorbateSlabRelaxationsSystem data model. - - -.. py:class:: TestAdsorbateSlabRelaxationsRequest(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest`\ ] - - Serde tests for the AdsorbateSlabRelaxationsRequest data model. - - -.. py:class:: TestAdsorbateSlabRelaxationsRequest_req_fields_only(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest`\ ] - - Serde tests for the AdsorbateSlabRelaxationsRequest data model in which - optional fields are omitted. - - -.. py:class:: TestAdsorbateSlabRelaxationResult(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult`\ ] - - Serde tests for the AdsorbateSlabRelaxationResult data model. - - .. py:method:: test_to_ase_atoms() -> None - - - -.. py:class:: TestAdsorbateSlabRelaxationResult_req_fields_only(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult`\ ] - - Serde tests for the AdsorbateSlabRelaxationResult data model in which - optional fields are omitted. - - -.. py:class:: TestAdsorbateSlabRelaxationsResults(*args: Any, **kwargs: Any) - - - Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults`\ ] - - Serde tests for the AdsorbateSlabRelaxationsResults data model. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.rst deleted file mode 100644 index df079a7fa..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.rst +++ /dev/null @@ -1,59 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.client.test_ui` -======================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.unit.client.test_ui - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.client.test_ui.TestUI - - - - -.. py:class:: TestUI(methodName='runTest') - - - Bases: :py:obj:`unittest.TestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test_get_results_ui_url() -> None - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/index.rst deleted file mode 100644 index 60bb766c4..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit` -========================================= - -.. py:module:: fairchem.demo.ocpapi.tests.unit - - -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - - client/index.rst - workflows/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.rst deleted file mode 100644 index 254a3c3e6..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.workflows` -=================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.unit.workflows - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - test_adsorbates/index.rst - test_context/index.rst - test_filter/index.rst - test_retry/index.rst - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.rst deleted file mode 100644 index 61fcaddc7..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.rst +++ /dev/null @@ -1,135 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates` -=================================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.MockGetRelaxationResults - fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.TestMockGetRelaxationResults - fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.TestAdsorbates - - - - -.. py:exception:: TestException - - - Bases: :py:obj:`Exception` - - Common base class for all non-exit exceptions. - - .. py:attribute:: __test__ - :value: False - - - - -.. py:class:: MockGetRelaxationResults(num_configs: int, max_configs_to_return: int, status_to_return: Optional[Iterable[fairchem.demo.ocpapi.client.Status]] = None, raise_on_first_call: Optional[Exception] = None) - - - Helper that can be used to mock calls to - Client.get_adsorbate_slab_relaxations_results(). This allows for - some configs to be returned with "success" status and others to be - omitted, similar to the behavior in the API. - - .. py:method:: __call__(*args: Any, config_ids: Optional[List[int]] = None, **kwargs: Any) -> fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults - - - -.. py:class:: TestMockGetRelaxationResults(methodName='runTest') - - - Bases: :py:obj:`unittest.TestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test___call__() -> None - - - -.. py:class:: TestAdsorbates(methodName='runTest') - - - Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test_get_adsorbate_slab_relaxation_results() -> None - :async: - - - .. py:method:: test_wait_for_adsorbate_slab_relaxations() -> None - :async: - - - .. py:method:: test_find_adsorbate_binding_sites() -> None - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.rst deleted file mode 100644 index 39ee66412..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.rst +++ /dev/null @@ -1,59 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.workflows.test_context` -================================================================ - -.. py:module:: fairchem.demo.ocpapi.tests.unit.workflows.test_context - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_context.TestContext - - - - -.. py:class:: TestContext(methodName='runTest') - - - Bases: :py:obj:`unittest.TestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test_set_context_var() -> None - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.rst deleted file mode 100644 index c617cd247..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.rst +++ /dev/null @@ -1,79 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.workflows.test_filter` -=============================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.unit.workflows.test_filter - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_filter.TestFilter - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_filter._new_adslab - - - -.. py:function:: _new_adslab(miller_indices: Optional[Tuple[int, int, int]] = None) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs - - -.. py:class:: TestFilter(methodName='runTest') - - - Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test_keep_all_slabs() -> None - :async: - - - .. py:method:: test_keep_slabs_with_miller_indices() -> None - :async: - - - .. py:method:: test_prompt_for_slabs_to_keep() -> None - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.rst deleted file mode 100644 index 49b869975..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.rst +++ /dev/null @@ -1,92 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.tests.unit.workflows.test_retry` -============================================================== - -.. py:module:: fairchem.demo.ocpapi.tests.unit.workflows.test_retry - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_retry.TestRetry - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_retry.returns - fairchem.demo.ocpapi.tests.unit.workflows.test_retry.raises - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.tests.unit.workflows.test_retry.T - - -.. py:data:: T - - - -.. py:function:: returns(val: T) -> Callable[[], T] - - -.. py:function:: raises(ex: Exception) -> Callable[[], None] - - -.. py:class:: TestRetry(methodName='runTest') - - - Bases: :py:obj:`unittest.TestCase` - - A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - - .. py:method:: test_retry_api_calls__results() -> None - - - .. py:method:: test_retry_api_calls__wait() -> None - - - .. py:method:: test_retry_api_calls__logging() -> None - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/version/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/version/index.rst deleted file mode 100644 index 22ad42217..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/version/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.version` -====================================== - -.. py:module:: fairchem.demo.ocpapi.version - - -Module Contents ---------------- - -.. py:data:: VERSION - :value: '1.0.0' - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.rst deleted file mode 100644 index d3d76dea0..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.rst +++ /dev/null @@ -1,455 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows.adsorbates` -=================================================== - -.. py:module:: fairchem.demo.ocpapi.workflows.adsorbates - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.adsorbates.Lifetime - fairchem.demo.ocpapi.workflows.adsorbates.AdsorbateSlabRelaxations - fairchem.demo.ocpapi.workflows.adsorbates.AdsorbateBindingSites - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.adsorbates._setup_log_record_factory - fairchem.demo.ocpapi.workflows.adsorbates._ensure_model_supported - fairchem.demo.ocpapi.workflows.adsorbates._get_bulk_if_supported - fairchem.demo.ocpapi.workflows.adsorbates._ensure_adsorbate_supported - fairchem.demo.ocpapi.workflows.adsorbates._get_slabs - fairchem.demo.ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab - fairchem.demo.ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab_with_logging - fairchem.demo.ocpapi.workflows.adsorbates._get_adsorbate_configs_on_slabs - fairchem.demo.ocpapi.workflows.adsorbates._submit_relaxations - fairchem.demo.ocpapi.workflows.adsorbates._submit_relaxations_with_progress_logging - fairchem.demo.ocpapi.workflows.adsorbates.get_adsorbate_slab_relaxation_results - fairchem.demo.ocpapi.workflows.adsorbates.wait_for_adsorbate_slab_relaxations - fairchem.demo.ocpapi.workflows.adsorbates._delete_system - fairchem.demo.ocpapi.workflows.adsorbates._ensure_system_deleted - fairchem.demo.ocpapi.workflows.adsorbates._run_relaxations_on_slab - fairchem.demo.ocpapi.workflows.adsorbates._refresh_pbar - fairchem.demo.ocpapi.workflows.adsorbates._relax_binding_sites_on_slabs - fairchem.demo.ocpapi.workflows.adsorbates.find_adsorbate_binding_sites - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.adsorbates._CTX_AD_BULK - fairchem.demo.ocpapi.workflows.adsorbates._CTX_SLAB - fairchem.demo.ocpapi.workflows.adsorbates.DEFAULT_CLIENT - fairchem.demo.ocpapi.workflows.adsorbates._DEFAULT_ADSLAB_FILTER - - -.. py:data:: _CTX_AD_BULK - :type: contextvars.ContextVar[Tuple[str, str]] - - - -.. py:data:: _CTX_SLAB - :type: contextvars.ContextVar[fairchem.demo.ocpapi.client.Slab] - - - -.. py:function:: _setup_log_record_factory() -> None - - Adds a log record factory that stores information about the currently - running job on a log message. - - -.. py:data:: DEFAULT_CLIENT - :type: fairchem.demo.ocpapi.client.Client - - - -.. py:exception:: AdsorbatesException - - - Bases: :py:obj:`Exception` - - Base exception for all others in this module. - - -.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a model is not supported in the API. - - -.. py:exception:: UnsupportedBulkException(bulk: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a bulk material is not supported in the API. - - -.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when an adsorbate is not supported in the API. - - -.. py:class:: Lifetime(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Represents different lifetimes when running relaxations. - - .. py:attribute:: SAVE - - The relaxation will be available on API servers indefinitely. It will not - be possible to delete the relaxation in the future. - - .. py:attribute:: MARK_EPHEMERAL - - The relaxation will be saved on API servers, but can be deleted at any time - in the future. - - .. py:attribute:: DELETE - - The relaxation will be deleted from API servers as soon as the results have - been fetched. - - -.. py:class:: AdsorbateSlabRelaxations - - - Stores the relaxations of adsorbate placements on the surface of a slab. - - .. py:attribute:: slab - :type: fairchem.demo.ocpapi.client.Slab - - The slab on which the adsorbate was placed. - - .. py:attribute:: configs - :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - - Details of the relaxation of each adsorbate placement, including the - final position. - - .. py:attribute:: system_id - :type: str - - The ID of the system that stores all of the relaxations. - - .. py:attribute:: api_host - :type: str - - The API host on which the relaxations were run. - - .. py:attribute:: ui_url - :type: Optional[str] - - The URL at which results can be visualized. - - -.. py:class:: AdsorbateBindingSites - - - Stores the inputs and results of a set of relaxations of adsorbate - placements on the surface of a slab. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: bulk - :type: fairchem.demo.ocpapi.client.Bulk - - The bulk material that was being modeled. - - .. py:attribute:: model - :type: str - - The type of the model that was run. - - .. py:attribute:: slabs - :type: List[AdsorbateSlabRelaxations] - - The list of slabs that were generated from the bulk structure. Each - contains its own list of adsorbate placements. - - -.. py:function:: _ensure_model_supported(client: fairchem.demo.ocpapi.client.Client, model: str) -> None - :async: - - Checks that the input model is supported in the API. - - :param client: The client to use when making requests to the API. - :param model: The model to check. - - :raises UnsupportedModelException: If the model is not supported. - - -.. py:function:: _get_bulk_if_supported(client: fairchem.demo.ocpapi.client.Client, bulk: str) -> fairchem.demo.ocpapi.client.Bulk - :async: - - Returns the object from the input bulk if it is supported in the API. - - :param client: The client to use when making requests to the API. - :param bulk: The bulk to fetch. - - :raises UnsupportedBulkException: If the requested bulk is not supported. - - :returns: Bulk instance for the input type. - - -.. py:function:: _ensure_adsorbate_supported(client: fairchem.demo.ocpapi.client.Client, adsorbate: str) -> None - :async: - - Checks that the input adsorbate is supported in the API. - - :param client: The client to use when making requests to the API. - :param adsorbate: The adsorbate to check. - - :raises UnsupportedAdsorbateException: If the adsorbate is not supported. - - -.. py:function:: _get_slabs(client: fairchem.demo.ocpapi.client.Client, bulk: fairchem.demo.ocpapi.client.Bulk) -> List[fairchem.demo.ocpapi.client.Slab] - :async: - - Enumerates surfaces for the input bulk material. - - :param client: The client to use when making requests to the API. - :param bulk: The bulk material from which slabs will be generated. - - :returns: The list of slabs that were generated. - - -.. py:function:: _get_absorbate_configs_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs - :async: - - Generate initial guesses at adsorbate binding sites on the input slab. - - :param client: The client to use when making API calls. - :param adsorbate: Description of the adsorbate to place. - :param slab: The slab on which the adsorbate should be placed. - - :returns: An updated slab instance that has had tags applied to it and a list - of Atoms objects, each with the positions of the adsorbate atoms on - one of the candidate binding sites. - - -.. py:function:: _get_absorbate_configs_on_slab_with_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs - :async: - - Wrapper around _get_absorbate_configs_on_slab that adds logging. - - -.. py:function:: _get_adsorbate_configs_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slabs: List[fairchem.demo.ocpapi.client.Slab]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - Finds candidate adsorbate binding sites on each of the input slabs. - - :param client: The client to use when making API calls. - :param adsorbate: Description of the adsorbate to place. - :param slabs: The slabs on which the adsorbate should be placed. - - :returns: List of slabs and, for each, the positions of the adsorbate - atoms in the potential binding site. - - -.. py:function:: _submit_relaxations(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) -> str - :async: - - Start relaxations for each of the input adsorbate configurations on the - input slab. - - :param client: The client to use when making API calls. - :param adsorbate: Description of the adsorbate to place. - :param adsorbate_configs: Positions of the adsorbate on the slab. Each - will be relaxed independently. - :param bulk: The bulk material from which the slab was generated. - :param slab: The slab that should be searched for adsorbate binding sites. - :param model: The model to use when evaluating forces and energies. - :param ephemeral: Whether the relaxations should be marked as ephemeral. - - :returns: The system ID of the relaxation run, which can be used to fetch results - as they become available. - - -.. py:function:: _submit_relaxations_with_progress_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) -> str - :async: - - Wrapper around _submit_relaxations that adds periodic logging in case - calls to submit relaxations are being rate limited. - - -.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - :async: - - Wrapper around Client.get_adsorbate_slab_relaxations_results() that - handles retries, including re-fetching individual configurations that - are initially omitted. - - :param client: The client to use when making API calls. - :param system_id: The system ID of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :returns: List of relaxation results, one for each adsorbate configuration in - the system. - - -.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] - :async: - - Blocks until all relaxations in the input system have finished, whether - successfully or not. - - Relaxations are queued in the API, waiting until machines are ready to - run them. Once started, they can take 1-2 minutes to finish. This method - initially sleeps "slow_interval_sec" seconds between each check for any - relaxations having finished. Once at least one result is ready, subsequent - sleeps are for "fast_interval_sec" seconds. - - :param system_id: The ID of the system for which relaxations are running. - :param check_immediately: If False (default), sleep before the first check - for relaxations having finished. If True, check whether relaxations - have finished immediately on entering this function. - :param slow_interval_sec: The number of seconds to wait between each check - while all are still running. - :param fast_interval_sec: The number of seconds to wait between each check - when at least one relaxation has finished in the system. - :param pbar: A tqdm instance that tracks the number of configurations that - have finished. This will be updated with the number of individual - configurations whose relaxations have finished. - :param client: The client to use when making API calls. - - :returns: Map of config IDs in the system to their terminal status. - - -.. py:function:: _delete_system(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> None - :async: - - Deletes the input system, with retries on failed attempts. - - :param client: The client to use when making API calls. - :param system_id: The ID of the system to delete. - - -.. py:function:: _ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> AsyncGenerator[None, None] - :async: - - Immediately yields control to the caller. When control returns to this - function, try to delete the system with the input id. - - :param client: The client to use when making API calls. - :param system_id: The ID of the system to delete. - - -.. py:function:: _run_relaxations_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, lifetime: Lifetime, pbar: tqdm.tqdm) -> AdsorbateSlabRelaxations - :async: - - Start relaxations for each adsorbate configuration on the input slab - and wait for all to finish. - - :param client: The client to use when making API calls. - :param adsorbate: Description of the adsorbate to place. - :param adsorbate_configs: The positions of atoms in each adsorbate placement - to be relaxed. - :param bulk: The bulk material from which the slab was generated. - :param slab: The slab that should be searched for adsorbate binding sites. - :param model: The model to use when evaluating forces and energies. - :param lifetime: Whether relaxations should be saved on the server, be marked - as ephemeral (allowing them to deleted in the future), or deleted - immediately. - :param pbar: A progress bar to update as relaxations finish. - - :returns: Details of each adsorbate placement, including its relaxed position. - - -.. py:function:: _refresh_pbar(pbar: tqdm.tqdm, interval_sec: float) -> None - :async: - - Helper function that refreshes the input progress bar on a regular - schedule. This function never returns; it must be cancelled. - - :param pbar: The progress bar to refresh. - :param interval_sec: The number of seconds to wait between each refresh. - - -.. py:function:: _relax_binding_sites_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, bulk: fairchem.demo.ocpapi.client.Bulk, adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs], model: str, lifetime: Lifetime) -> AdsorbateBindingSites - :async: - - Search for adsorbate binding sites on the input slab. - - :param client: The client to use when making API calls. - :param adsorbate: Description of the adsorbate to place. - :param bulk: The bulk material from which the slab was generated. - :param adslabs: The slabs and, for each, the binding sites that should be - relaxed. - :param model: The model to use when evaluating forces and energies. - :param lifetime: Whether relaxations should be saved on the server, be marked - as ephemeral (allowing them to deleted in the future), or deleted - immediately. - - :returns: Details of each adsorbate placement, including its relaxed position. - - -.. py:data:: _DEFAULT_ADSLAB_FILTER - :type: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] - - - -.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites - :async: - - Search for adsorbate binding sites on surfaces of a bulk material. - This executes the following steps: - - 1. Ensure that both the adsorbate and bulk are supported in the - OCP API. - 2. Enumerate unique surfaces from the bulk material. - 3. Enumerate likely binding sites for the input adsorbate on each - of the generated surfaces. - 4. Filter the list of generated adsorbate/slab (adslab) configurations - using the input adslab_filter. - 5. Relax each generated surface+adsorbate structure by refining - atomic positions to minimize forces generated by the input model. - - :param adsorbate: Description of the adsorbate to place. - :param bulk: The ID (typically Materials Project MP ID) of the bulk material - on which the adsorbate will be placed. - :param model: The type of the model to use when calculating forces during - relaxations. - :param adslab_filter: A function that modifies the set of adsorbate/slab - configurations that will be relaxed. This can be used to subselect - slabs and/or adsorbate configurations. - :param client: The OCP API client to use. - :param lifetime: Whether relaxations should be saved on the server, be marked - as ephemeral (allowing them to deleted in the future), or deleted - immediately. - - :returns: Details of each adsorbate binding site, including results of relaxing - to locally-optimized positions using the input model. - - :raises UnsupportedModelException: If the requested model is not supported. - :raises UnsupportedBulkException: If the requested bulk is not supported. - :raises UnsupportedAdsorbateException: If the requested adsorbate is not - supported. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/context/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/context/index.rst deleted file mode 100644 index 80fc7d78d..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/context/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows.context` -================================================ - -.. py:module:: fairchem.demo.ocpapi.workflows.context - - -Module Contents ---------------- - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.context.set_context_var - - - -.. py:function:: set_context_var(context_var: contextvars.ContextVar, value: Any) -> Generator[None, None, None] - - Sets the input convext variable to the input value and yields control - back to the caller. When control returns to this function, the context - variable is reset to its original value. - - :param context_var: The context variable to set. - :param value: The value to assign to the variable. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/filter/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/filter/index.rst deleted file mode 100644 index 66826b0b1..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/filter/index.rst +++ /dev/null @@ -1,61 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows.filter` -=============================================== - -.. py:module:: fairchem.demo.ocpapi.workflows.filter - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.filter.keep_all_slabs - fairchem.demo.ocpapi.workflows.filter.keep_slabs_with_miller_indices - fairchem.demo.ocpapi.workflows.filter.prompt_for_slabs_to_keep - - - - -.. py:class:: keep_all_slabs - - - Adslab filter than returns all slabs. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) - - - Adslab filter that keeps any slabs with the configured miller indices. - Slabs with other miller indices will be ignored. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: prompt_for_slabs_to_keep - - - Adslab filter than presents the user with an interactive prompt to choose - which of the input slabs to keep. - - .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] - :staticmethod: - - Generates a sort key from the input adslab. Returns the miller indices, - shift, and top/bottom label so that they will be sorted by those values - in that order. - - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/index.rst deleted file mode 100644 index 0cc6989fb..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/index.rst +++ /dev/null @@ -1,331 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows` -======================================== - -.. py:module:: fairchem.demo.ocpapi.workflows - - -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - - adsorbates/index.rst - context/index.rst - filter/index.rst - log/index.rst - retry/index.rst - - -Package Contents ----------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.AdsorbateBindingSites - fairchem.demo.ocpapi.workflows.AdsorbateSlabRelaxations - fairchem.demo.ocpapi.workflows.Lifetime - fairchem.demo.ocpapi.workflows.keep_all_slabs - fairchem.demo.ocpapi.workflows.keep_slabs_with_miller_indices - fairchem.demo.ocpapi.workflows.prompt_for_slabs_to_keep - fairchem.demo.ocpapi.workflows.RateLimitLogging - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.find_adsorbate_binding_sites - fairchem.demo.ocpapi.workflows.get_adsorbate_slab_relaxation_results - fairchem.demo.ocpapi.workflows.wait_for_adsorbate_slab_relaxations - fairchem.demo.ocpapi.workflows.retry_api_calls - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.NO_LIMIT - fairchem.demo.ocpapi.workflows.NoLimitType - - -.. py:class:: AdsorbateBindingSites - - - Stores the inputs and results of a set of relaxations of adsorbate - placements on the surface of a slab. - - .. py:attribute:: adsorbate - :type: str - - Description of the adsorbate. - - .. py:attribute:: bulk - :type: fairchem.demo.ocpapi.client.Bulk - - The bulk material that was being modeled. - - .. py:attribute:: model - :type: str - - The type of the model that was run. - - .. py:attribute:: slabs - :type: List[AdsorbateSlabRelaxations] - - The list of slabs that were generated from the bulk structure. Each - contains its own list of adsorbate placements. - - -.. py:class:: AdsorbateSlabRelaxations - - - Stores the relaxations of adsorbate placements on the surface of a slab. - - .. py:attribute:: slab - :type: fairchem.demo.ocpapi.client.Slab - - The slab on which the adsorbate was placed. - - .. py:attribute:: configs - :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - - Details of the relaxation of each adsorbate placement, including the - final position. - - .. py:attribute:: system_id - :type: str - - The ID of the system that stores all of the relaxations. - - .. py:attribute:: api_host - :type: str - - The API host on which the relaxations were run. - - .. py:attribute:: ui_url - :type: Optional[str] - - The URL at which results can be visualized. - - -.. py:class:: Lifetime(*args, **kwds) - - - Bases: :py:obj:`enum.Enum` - - Represents different lifetimes when running relaxations. - - .. py:attribute:: SAVE - - The relaxation will be available on API servers indefinitely. It will not - be possible to delete the relaxation in the future. - - .. py:attribute:: MARK_EPHEMERAL - - The relaxation will be saved on API servers, but can be deleted at any time - in the future. - - .. py:attribute:: DELETE - - The relaxation will be deleted from API servers as soon as the results have - been fetched. - - -.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when an adsorbate is not supported in the API. - - -.. py:exception:: UnsupportedBulkException(bulk: str) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a bulk material is not supported in the API. - - -.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) - - - Bases: :py:obj:`AdsorbatesException` - - Exception raised when a model is not supported in the API. - - -.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites - :async: - - Search for adsorbate binding sites on surfaces of a bulk material. - This executes the following steps: - - 1. Ensure that both the adsorbate and bulk are supported in the - OCP API. - 2. Enumerate unique surfaces from the bulk material. - 3. Enumerate likely binding sites for the input adsorbate on each - of the generated surfaces. - 4. Filter the list of generated adsorbate/slab (adslab) configurations - using the input adslab_filter. - 5. Relax each generated surface+adsorbate structure by refining - atomic positions to minimize forces generated by the input model. - - :param adsorbate: Description of the adsorbate to place. - :param bulk: The ID (typically Materials Project MP ID) of the bulk material - on which the adsorbate will be placed. - :param model: The type of the model to use when calculating forces during - relaxations. - :param adslab_filter: A function that modifies the set of adsorbate/slab - configurations that will be relaxed. This can be used to subselect - slabs and/or adsorbate configurations. - :param client: The OCP API client to use. - :param lifetime: Whether relaxations should be saved on the server, be marked - as ephemeral (allowing them to deleted in the future), or deleted - immediately. - - :returns: Details of each adsorbate binding site, including results of relaxing - to locally-optimized positions using the input model. - - :raises UnsupportedModelException: If the requested model is not supported. - :raises UnsupportedBulkException: If the requested bulk is not supported. - :raises UnsupportedAdsorbateException: If the requested adsorbate is not - supported. - - -.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] - :async: - - Wrapper around Client.get_adsorbate_slab_relaxations_results() that - handles retries, including re-fetching individual configurations that - are initially omitted. - - :param client: The client to use when making API calls. - :param system_id: The system ID of the relaxations. - :param config_ids: If defined and not empty, a subset of configurations - to fetch. Otherwise all configurations are returned. - :param fields: If defined and not empty, a subset of fields in each - configuration to fetch. Otherwise all fields are returned. - - :returns: List of relaxation results, one for each adsorbate configuration in - the system. - - -.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] - :async: - - Blocks until all relaxations in the input system have finished, whether - successfully or not. - - Relaxations are queued in the API, waiting until machines are ready to - run them. Once started, they can take 1-2 minutes to finish. This method - initially sleeps "slow_interval_sec" seconds between each check for any - relaxations having finished. Once at least one result is ready, subsequent - sleeps are for "fast_interval_sec" seconds. - - :param system_id: The ID of the system for which relaxations are running. - :param check_immediately: If False (default), sleep before the first check - for relaxations having finished. If True, check whether relaxations - have finished immediately on entering this function. - :param slow_interval_sec: The number of seconds to wait between each check - while all are still running. - :param fast_interval_sec: The number of seconds to wait between each check - when at least one relaxation has finished in the system. - :param pbar: A tqdm instance that tracks the number of configurations that - have finished. This will be updated with the number of individual - configurations whose relaxations have finished. - :param client: The client to use when making API calls. - - :returns: Map of config IDs in the system to their terminal status. - - -.. py:class:: keep_all_slabs - - - Adslab filter than returns all slabs. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) - - - Adslab filter that keeps any slabs with the configured miller indices. - Slabs with other miller indices will be ignored. - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:class:: prompt_for_slabs_to_keep - - - Adslab filter than presents the user with an interactive prompt to choose - which of the input slabs to keep. - - .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] - :staticmethod: - - Generates a sort key from the input adslab. Returns the miller indices, - shift, and top/bottom label so that they will be sorted by those values - in that order. - - - .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] - :async: - - - -.. py:data:: NO_LIMIT - :type: NoLimitType - :value: 0 - - - -.. py:data:: NoLimitType - - - -.. py:class:: RateLimitLogging - - - Controls logging when rate limits are hit. - - .. py:attribute:: logger - :type: logging.Logger - - The logger to use. - - .. py:attribute:: action - :type: str - - A short description of the action being attempted. - - -.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any - - Decorator with sensible defaults for retrying calls to the OCP API. - - :param max_attempts: The maximum number of calls to make. If NO_LIMIT, - retries will be made forever. - :param rate_limit_logging: If not None, log statements will be generated - using this configuration when a rate limit is hit. - :param fixed_wait_sec: The fixed number of seconds to wait when retrying an - exception that does *not* include a retry-after value. The default - value is sensible; this is exposed mostly for testing. - :param max_jitter_sec: The maximum number of seconds that will be randomly - added to wait times. The default value is sensible; this is exposed - mostly for testing. - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/log/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/log/index.rst deleted file mode 100644 index 409239dfd..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/log/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows.log` -============================================ - -.. py:module:: fairchem.demo.ocpapi.workflows.log - - -Module Contents ---------------- - -.. py:data:: log - - - diff --git a/_sources/autoapi/fairchem/demo/ocpapi/workflows/retry/index.rst b/_sources/autoapi/fairchem/demo/ocpapi/workflows/retry/index.rst deleted file mode 100644 index 6b0eafcf3..000000000 --- a/_sources/autoapi/fairchem/demo/ocpapi/workflows/retry/index.rst +++ /dev/null @@ -1,95 +0,0 @@ -:py:mod:`fairchem.demo.ocpapi.workflows.retry` -============================================== - -.. py:module:: fairchem.demo.ocpapi.workflows.retry - - -Module Contents ---------------- - -Classes -~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.retry.RateLimitLogging - fairchem.demo.ocpapi.workflows.retry._wait_check_retry_after - - - -Functions -~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.retry.retry_api_calls - - - -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - - fairchem.demo.ocpapi.workflows.retry.NoLimitType - fairchem.demo.ocpapi.workflows.retry.NO_LIMIT - - -.. py:class:: RateLimitLogging - - - Controls logging when rate limits are hit. - - .. py:attribute:: logger - :type: logging.Logger - - The logger to use. - - .. py:attribute:: action - :type: str - - A short description of the action being attempted. - - -.. py:class:: _wait_check_retry_after(default_wait: tenacity.wait.wait_base, rate_limit_logging: Optional[RateLimitLogging] = None) - - - Bases: :py:obj:`tenacity.wait.wait_base` - - Tenacity wait strategy that first checks whether RateLimitExceededException - was raised and that it includes a retry-after value; if so wait, for that - amount of time. Otherwise, fall back to the provided default strategy. - - .. py:method:: __call__(retry_state: tenacity.RetryCallState) -> float - - If a RateLimitExceededException was raised and has a retry_after value, - return it. Otherwise use the default waiter method. - - - -.. py:data:: NoLimitType - - - -.. py:data:: NO_LIMIT - :type: NoLimitType - :value: 0 - - - -.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any - - Decorator with sensible defaults for retrying calls to the OCP API. - - :param max_attempts: The maximum number of calls to make. If NO_LIMIT, - retries will be made forever. - :param rate_limit_logging: If not None, log statements will be generated - using this configuration when a rate limit is hit. - :param fixed_wait_sec: The fixed number of seconds to wait when retrying an - exception that does *not* include a retry-after value. The default - value is sensible; this is exposed mostly for testing. - :param max_jitter_sec: The maximum number of seconds that will be randomly - added to wait times. The default value is sensible; this is exposed - mostly for testing. - - diff --git a/_sources/autoapi/index.rst b/_sources/autoapi/index.rst index 448b407b2..c85ca02fe 100644 --- a/_sources/autoapi/index.rst +++ b/_sources/autoapi/index.rst @@ -6,5 +6,8 @@ This page contains auto-generated API reference documentation [#f1]_. .. toctree:: :titlesonly: + /autoapi/core/index + /autoapi/data/index + /autoapi/ocpapi/index .. [#f1] Created with `sphinx-autoapi `_ \ No newline at end of file diff --git a/_sources/autoapi/ocpapi/client/client/index.rst b/_sources/autoapi/ocpapi/client/client/index.rst new file mode 100644 index 000000000..e85120904 --- /dev/null +++ b/_sources/autoapi/ocpapi/client/client/index.rst @@ -0,0 +1,248 @@ +:py:mod:`ocpapi.client.client` +============================== + +.. py:module:: ocpapi.client.client + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.client.Client + + + + +.. py:exception:: RequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`Exception` + + Exception raised any time there is an error while making an API call. + + +.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected for a reason that will + not succeed on retry. For example, this might include a malformed request + or action that is not allowed. + + +.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected because a rate limit has + been exceeded. + + .. attribute:: retry_after + + If known, the time to wait before the next attempt to + call the API should be made. + + +.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') + + + Exposes each route in the OCP API as a method. + + .. py:property:: host + :type: str + + The host being called by this client. + + .. py:method:: get_models() -> ocpapi.client.models.Models + :async: + + Fetch the list of models that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The models that are supported in the API. + + + .. py:method:: get_bulks() -> ocpapi.client.models.Bulks + :async: + + Fetch the list of bulk materials that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The bulks that are supported throughout the API. + + + .. py:method:: get_adsorbates() -> ocpapi.client.models.Adsorbates + :async: + + Fetch the list of adsorbates that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The adsorbates that are supported throughout the API. + + + .. py:method:: get_slabs(bulk: Union[str, ocpapi.client.models.Bulk]) -> ocpapi.client.models.Slabs + :async: + + Get a unique list of slabs for the input bulk structure. + + :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk + instance to use. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Slabs for each of the unique surfaces of the material. + + + .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: ocpapi.client.models.Slab) -> ocpapi.client.models.AdsorbateSlabConfigs + :async: + + Get a list of possible binding sites for the input adsorbate on the + input slab. + + :param adsorbate: Description of the the adsorbate to place. + :param slab: Information about the slab on which the adsorbate should + be placed. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Configurations for each adsorbate binding site on the slab. + + + .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[ocpapi.client.models.Atoms], bulk: ocpapi.client.models.Bulk, slab: ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> ocpapi.client.models.AdsorbateSlabRelaxationsSystem + :async: + + Starts relaxations of the input adsorbate configurations on the input + slab using energies and forces returned by the input model. Relaxations + are run asynchronously and results can be fetched using the system id + that is returned from this method. + + :param adsorbate: Description of the adsorbate being simulated. + :param adsorbate_configs: List of adsorbate configurations to relax. This + should only include the adsorbates themselves; the surface is + defined in the "slab" field that is a peer to this one. + :param bulk: Details of the bulk material being simulated. + :param slab: The structure of the slab on which adsorbates are placed. + :param model: The model that will be used to evaluate energies and forces + during relaxations. + :param ephemeral: If False (default), any later attempt to delete the + generated relaxations will be rejected. If True, deleting the + relaxations will be allowed, which is generally useful for + testing when there is no reason for results to be persisted. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: IDs of the relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> ocpapi.client.models.AdsorbateSlabRelaxationsRequest + :async: + + Fetches the original relaxations request for the input system. + + :param system_id: The ID of the system to fetch. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The original request that was made when submitting relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> ocpapi.client.models.AdsorbateSlabRelaxationsResults + :async: + + Fetches relaxation results for the input system. + + :param system_id: The system id of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The relaxation results for each configuration in the system. + + + .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None + :async: + + Deletes all relaxation results for the input system. + + :param system_id: The ID of the system to delete. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + + .. py:method:: _run_request(path: str, method: str, **kwargs) -> str + :async: + + Helper method that runs the input request on a thread so that + it doesn't block the event loop on the calling thread. + + :param path: The URL path to make the request against. + :param method: The HTTP method to use (GET, POST, etc.). + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The response body from the request as a string. + + + diff --git a/_sources/autoapi/ocpapi/client/index.rst b/_sources/autoapi/ocpapi/client/index.rst new file mode 100644 index 000000000..d7ee9111a --- /dev/null +++ b/_sources/autoapi/ocpapi/client/index.rst @@ -0,0 +1,703 @@ +:py:mod:`ocpapi.client` +======================= + +.. py:module:: ocpapi.client + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + client/index.rst + models/index.rst + ui/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.Client + ocpapi.client.Adsorbates + ocpapi.client.AdsorbateSlabConfigs + ocpapi.client.AdsorbateSlabRelaxationResult + ocpapi.client.AdsorbateSlabRelaxationsRequest + ocpapi.client.AdsorbateSlabRelaxationsResults + ocpapi.client.AdsorbateSlabRelaxationsSystem + ocpapi.client.Atoms + ocpapi.client.Bulk + ocpapi.client.Bulks + ocpapi.client.Model + ocpapi.client.Models + ocpapi.client.Slab + ocpapi.client.SlabMetadata + ocpapi.client.Slabs + ocpapi.client.Status + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.get_results_ui_url + + + +.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') + + + Exposes each route in the OCP API as a method. + + .. py:property:: host + :type: str + + The host being called by this client. + + .. py:method:: get_models() -> ocpapi.client.models.Models + :async: + + Fetch the list of models that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The models that are supported in the API. + + + .. py:method:: get_bulks() -> ocpapi.client.models.Bulks + :async: + + Fetch the list of bulk materials that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The bulks that are supported throughout the API. + + + .. py:method:: get_adsorbates() -> ocpapi.client.models.Adsorbates + :async: + + Fetch the list of adsorbates that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The adsorbates that are supported throughout the API. + + + .. py:method:: get_slabs(bulk: Union[str, ocpapi.client.models.Bulk]) -> ocpapi.client.models.Slabs + :async: + + Get a unique list of slabs for the input bulk structure. + + :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk + instance to use. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Slabs for each of the unique surfaces of the material. + + + .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: ocpapi.client.models.Slab) -> ocpapi.client.models.AdsorbateSlabConfigs + :async: + + Get a list of possible binding sites for the input adsorbate on the + input slab. + + :param adsorbate: Description of the the adsorbate to place. + :param slab: Information about the slab on which the adsorbate should + be placed. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Configurations for each adsorbate binding site on the slab. + + + .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[ocpapi.client.models.Atoms], bulk: ocpapi.client.models.Bulk, slab: ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> ocpapi.client.models.AdsorbateSlabRelaxationsSystem + :async: + + Starts relaxations of the input adsorbate configurations on the input + slab using energies and forces returned by the input model. Relaxations + are run asynchronously and results can be fetched using the system id + that is returned from this method. + + :param adsorbate: Description of the adsorbate being simulated. + :param adsorbate_configs: List of adsorbate configurations to relax. This + should only include the adsorbates themselves; the surface is + defined in the "slab" field that is a peer to this one. + :param bulk: Details of the bulk material being simulated. + :param slab: The structure of the slab on which adsorbates are placed. + :param model: The model that will be used to evaluate energies and forces + during relaxations. + :param ephemeral: If False (default), any later attempt to delete the + generated relaxations will be rejected. If True, deleting the + relaxations will be allowed, which is generally useful for + testing when there is no reason for results to be persisted. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: IDs of the relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> ocpapi.client.models.AdsorbateSlabRelaxationsRequest + :async: + + Fetches the original relaxations request for the input system. + + :param system_id: The ID of the system to fetch. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The original request that was made when submitting relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> ocpapi.client.models.AdsorbateSlabRelaxationsResults + :async: + + Fetches relaxation results for the input system. + + :param system_id: The system id of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The relaxation results for each configuration in the system. + + + .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None + :async: + + Deletes all relaxation results for the input system. + + :param system_id: The ID of the system to delete. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + + .. py:method:: _run_request(path: str, method: str, **kwargs) -> str + :async: + + Helper method that runs the input request on a thread so that + it doesn't block the event loop on the calling thread. + + :param path: The URL path to make the request against. + :param method: The HTTP method to use (GET, POST, etc.). + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The response body from the request as a string. + + + +.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected for a reason that will + not succeed on retry. For example, this might include a malformed request + or action that is not allowed. + + +.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected because a rate limit has + been exceeded. + + .. attribute:: retry_after + + If known, the time to wait before the next attempt to + call the API should be made. + + +.. py:exception:: RequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`Exception` + + Exception raised any time there is an error while making an API call. + + +.. py:class:: Adsorbates + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch adsorbates supported in the + API. + + .. py:attribute:: adsorbates_supported + :type: List[str] + + List of adsorbates that can be used in the API. + + +.. py:class:: AdsorbateSlabConfigs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch placements of a single + absorbate on a slab. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of structures, each representing one possible adsorbate placement. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which the adsorbate is placed. + + +.. py:class:: AdsorbateSlabRelaxationResult + + + Bases: :py:obj:`_DataModel` + + Stores information about a single adsorbate slab configuration, including + outputs for the model used in relaxations. + + The API to fetch relaxation results supports requesting a subset of fields + in order to limit the size of response payloads. Optional attributes will + be defined only if they are including the response. + + .. py:attribute:: config_id + :type: int + + ID of the configuration within the system. + + .. py:attribute:: status + :type: Status + + The status of the request for information about this configuration. + + .. py:attribute:: system_id + :type: Optional[str] + + The ID of the system in which the configuration was originally submitted. + + .. py:attribute:: cell + :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Optional[Tuple[bool, bool, bool]] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: Optional[List[int]] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: Optional[List[Tuple[float, float, float]]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: Optional[List[int]] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:attribute:: energy + :type: Optional[float] + + The energy of the configuration. + + .. py:attribute:: energy_trajectory + :type: Optional[List[float]] + + The energy of the configuration at each point along the relaxation + trajectory. + + .. py:attribute:: forces + :type: Optional[List[Tuple[float, float, float]]] + + The forces on each atom in the relaxed structure. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + The predicted energy and forces will also be copied to the new + ase.Atoms object as a SinglePointCalculator (a calculator that + stores the results of an already-run simulation). + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: AdsorbateSlabRelaxationsRequest + + + Bases: :py:obj:`_DataModel` + + Stores the request to submit a new batch of adsorbate slab relaxations. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of adsorbate placements being relaxed. + + .. py:attribute:: bulk + :type: Bulk + + Information about the original bulk structure used to create the slab. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which adsorbates are placed. + + .. py:attribute:: model + :type: str + + The type of the ML model being used during relaxations. + + .. py:attribute:: ephemeral + :type: Optional[bool] + + Whether the relaxations can be deleted (assume they cannot be deleted if + None). + + .. py:attribute:: adsorbate_reaction + :type: Optional[str] + + If possible, an html-formatted string describing the reaction will be added + to this field. + + +.. py:class:: AdsorbateSlabRelaxationsResults + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for results of adsorbate slab + relaxations. + + .. py:attribute:: configs + :type: List[AdsorbateSlabRelaxationResult] + + List of configurations in the system, each representing one placement of + an adsorbate on a slab surface. + + .. py:attribute:: omitted_config_ids + :type: List[int] + + List of IDs of configurations that were requested but omitted by the + server. Results for these IDs can be requested again. + + +.. py:class:: AdsorbateSlabRelaxationsSystem + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to submit a new batch of adsorbate + slab relaxations. + + .. py:attribute:: system_id + :type: str + + Unique ID for this set of relaxations which can be used to fetch results + later. + + .. py:attribute:: config_ids + :type: List[int] + + The list of IDs assigned to each of the input adsorbate placements, in the + same order in which they were submitted. + + +.. py:class:: Atoms + + + Bases: :py:obj:`_DataModel` + + Subset of the fields from an ASE Atoms object that are used within this + API. + + .. py:attribute:: cell + :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Tuple[bool, bool, bool] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: List[int] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: List[Tuple[float, float, float]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: List[int] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: Bulk + + + Bases: :py:obj:`_DataModel` + + Stores information about a single bulk material. + + .. py:attribute:: src_id + :type: str + + The ID of the material. + + .. py:attribute:: formula + :type: str + + The chemical formula of the material. + + .. py:attribute:: elements + :type: List[str] + + The list of elements in the material. + + +.. py:class:: Bulks + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch bulks supported in the API. + + .. py:attribute:: bulks_supported + :type: List[Bulk] + + List of bulks that can be used in the API. + + +.. py:class:: Model + + + Bases: :py:obj:`_DataModel` + + Stores information about a single model supported in the API. + + .. py:attribute:: id + :type: str + + The ID of the model. + + +.. py:class:: Models + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for models supported in the API. + + .. py:attribute:: models + :type: List[Model] + + The list of models that are supported. + + +.. py:class:: Slab + + + Bases: :py:obj:`_DataModel` + + Stores all information about a slab that is returned from the API. + + .. py:attribute:: atoms + :type: Atoms + + The structure of the slab. + + .. py:attribute:: metadata + :type: SlabMetadata + + Extra information about the slab. + + +.. py:class:: SlabMetadata + + + Bases: :py:obj:`_DataModel` + + Stores metadata about a slab that is returned from the API. + + .. py:attribute:: bulk_src_id + :type: str + + The ID of the bulk material from which the slab was derived. + + .. py:attribute:: millers + :type: Tuple[int, int, int] + + The Miller indices of the slab relative to bulk structure. + + .. py:attribute:: shift + :type: float + + The position along the vector defined by the Miller indices at which a + cut was taken to generate the slab surface. + + .. py:attribute:: top + :type: bool + + If False, the top and bottom surfaces for this millers/shift pair are + distinct and this slab represents the bottom surface. + + +.. py:class:: Slabs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch slabs for a bulk structure. + + .. py:attribute:: slabs + :type: List[Slab] + + The list of slabs that were generated from the input bulk structure. + + +.. py:class:: Status(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Relaxation status of a single adsorbate placement on a slab. + + .. py:attribute:: NOT_AVAILABLE + :value: 'not_available' + + The configuration exists but the result is not yet available. It is + possible that checking again in the future could yield a result. + + .. py:attribute:: FAILED_RELAXATION + :value: 'failed_relaxation' + + The relaxation failed for this configuration. + + .. py:attribute:: SUCCESS + :value: 'success' + + The relaxation was successful and the requested information about the + configuration was returned. + + .. py:attribute:: DOES_NOT_EXIST + :value: 'does_not_exist' + + The requested configuration does not exist. + + .. py:method:: __str__() -> str + + Return str(self). + + + +.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] + + Generates the URL at which results for the input system can be + visualized. + + :param api_host: The API host on which the system was run. + :param system_id: ID of the system being visualized. + + :returns: The URL at which the input system can be visualized. None if the + API host is not recognized. + + diff --git a/_sources/autoapi/ocpapi/client/models/index.rst b/_sources/autoapi/ocpapi/client/models/index.rst new file mode 100644 index 000000000..9ec653d38 --- /dev/null +++ b/_sources/autoapi/ocpapi/client/models/index.rst @@ -0,0 +1,455 @@ +:py:mod:`ocpapi.client.models` +============================== + +.. py:module:: ocpapi.client.models + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.models._DataModel + ocpapi.client.models.Model + ocpapi.client.models.Models + ocpapi.client.models.Bulk + ocpapi.client.models.Bulks + ocpapi.client.models.Adsorbates + ocpapi.client.models.Atoms + ocpapi.client.models.SlabMetadata + ocpapi.client.models.Slab + ocpapi.client.models.Slabs + ocpapi.client.models.AdsorbateSlabConfigs + ocpapi.client.models.AdsorbateSlabRelaxationsSystem + ocpapi.client.models.AdsorbateSlabRelaxationsRequest + ocpapi.client.models.Status + ocpapi.client.models.AdsorbateSlabRelaxationResult + ocpapi.client.models.AdsorbateSlabRelaxationsResults + + + + +.. py:class:: _DataModel + + + Base class for all data models. + + .. py:attribute:: other_fields + :type: dataclasses_json.CatchAll + + Fields that may have been added to the API that all not yet supported + explicitly in this class. + + +.. py:class:: Model + + + Bases: :py:obj:`_DataModel` + + Stores information about a single model supported in the API. + + .. py:attribute:: id + :type: str + + The ID of the model. + + +.. py:class:: Models + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for models supported in the API. + + .. py:attribute:: models + :type: List[Model] + + The list of models that are supported. + + +.. py:class:: Bulk + + + Bases: :py:obj:`_DataModel` + + Stores information about a single bulk material. + + .. py:attribute:: src_id + :type: str + + The ID of the material. + + .. py:attribute:: formula + :type: str + + The chemical formula of the material. + + .. py:attribute:: elements + :type: List[str] + + The list of elements in the material. + + +.. py:class:: Bulks + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch bulks supported in the API. + + .. py:attribute:: bulks_supported + :type: List[Bulk] + + List of bulks that can be used in the API. + + +.. py:class:: Adsorbates + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch adsorbates supported in the + API. + + .. py:attribute:: adsorbates_supported + :type: List[str] + + List of adsorbates that can be used in the API. + + +.. py:class:: Atoms + + + Bases: :py:obj:`_DataModel` + + Subset of the fields from an ASE Atoms object that are used within this + API. + + .. py:attribute:: cell + :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Tuple[bool, bool, bool] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: List[int] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: List[Tuple[float, float, float]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: List[int] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: SlabMetadata + + + Bases: :py:obj:`_DataModel` + + Stores metadata about a slab that is returned from the API. + + .. py:attribute:: bulk_src_id + :type: str + + The ID of the bulk material from which the slab was derived. + + .. py:attribute:: millers + :type: Tuple[int, int, int] + + The Miller indices of the slab relative to bulk structure. + + .. py:attribute:: shift + :type: float + + The position along the vector defined by the Miller indices at which a + cut was taken to generate the slab surface. + + .. py:attribute:: top + :type: bool + + If False, the top and bottom surfaces for this millers/shift pair are + distinct and this slab represents the bottom surface. + + +.. py:class:: Slab + + + Bases: :py:obj:`_DataModel` + + Stores all information about a slab that is returned from the API. + + .. py:attribute:: atoms + :type: Atoms + + The structure of the slab. + + .. py:attribute:: metadata + :type: SlabMetadata + + Extra information about the slab. + + +.. py:class:: Slabs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch slabs for a bulk structure. + + .. py:attribute:: slabs + :type: List[Slab] + + The list of slabs that were generated from the input bulk structure. + + +.. py:class:: AdsorbateSlabConfigs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch placements of a single + absorbate on a slab. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of structures, each representing one possible adsorbate placement. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which the adsorbate is placed. + + +.. py:class:: AdsorbateSlabRelaxationsSystem + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to submit a new batch of adsorbate + slab relaxations. + + .. py:attribute:: system_id + :type: str + + Unique ID for this set of relaxations which can be used to fetch results + later. + + .. py:attribute:: config_ids + :type: List[int] + + The list of IDs assigned to each of the input adsorbate placements, in the + same order in which they were submitted. + + +.. py:class:: AdsorbateSlabRelaxationsRequest + + + Bases: :py:obj:`_DataModel` + + Stores the request to submit a new batch of adsorbate slab relaxations. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of adsorbate placements being relaxed. + + .. py:attribute:: bulk + :type: Bulk + + Information about the original bulk structure used to create the slab. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which adsorbates are placed. + + .. py:attribute:: model + :type: str + + The type of the ML model being used during relaxations. + + .. py:attribute:: ephemeral + :type: Optional[bool] + + Whether the relaxations can be deleted (assume they cannot be deleted if + None). + + .. py:attribute:: adsorbate_reaction + :type: Optional[str] + + If possible, an html-formatted string describing the reaction will be added + to this field. + + +.. py:class:: Status(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Relaxation status of a single adsorbate placement on a slab. + + .. py:attribute:: NOT_AVAILABLE + :value: 'not_available' + + The configuration exists but the result is not yet available. It is + possible that checking again in the future could yield a result. + + .. py:attribute:: FAILED_RELAXATION + :value: 'failed_relaxation' + + The relaxation failed for this configuration. + + .. py:attribute:: SUCCESS + :value: 'success' + + The relaxation was successful and the requested information about the + configuration was returned. + + .. py:attribute:: DOES_NOT_EXIST + :value: 'does_not_exist' + + The requested configuration does not exist. + + .. py:method:: __str__() -> str + + Return str(self). + + + +.. py:class:: AdsorbateSlabRelaxationResult + + + Bases: :py:obj:`_DataModel` + + Stores information about a single adsorbate slab configuration, including + outputs for the model used in relaxations. + + The API to fetch relaxation results supports requesting a subset of fields + in order to limit the size of response payloads. Optional attributes will + be defined only if they are including the response. + + .. py:attribute:: config_id + :type: int + + ID of the configuration within the system. + + .. py:attribute:: status + :type: Status + + The status of the request for information about this configuration. + + .. py:attribute:: system_id + :type: Optional[str] + + The ID of the system in which the configuration was originally submitted. + + .. py:attribute:: cell + :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Optional[Tuple[bool, bool, bool]] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: Optional[List[int]] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: Optional[List[Tuple[float, float, float]]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: Optional[List[int]] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:attribute:: energy + :type: Optional[float] + + The energy of the configuration. + + .. py:attribute:: energy_trajectory + :type: Optional[List[float]] + + The energy of the configuration at each point along the relaxation + trajectory. + + .. py:attribute:: forces + :type: Optional[List[Tuple[float, float, float]]] + + The forces on each atom in the relaxed structure. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + The predicted energy and forces will also be copied to the new + ase.Atoms object as a SinglePointCalculator (a calculator that + stores the results of an already-run simulation). + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: AdsorbateSlabRelaxationsResults + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for results of adsorbate slab + relaxations. + + .. py:attribute:: configs + :type: List[AdsorbateSlabRelaxationResult] + + List of configurations in the system, each representing one placement of + an adsorbate on a slab surface. + + .. py:attribute:: omitted_config_ids + :type: List[int] + + List of IDs of configurations that were requested but omitted by the + server. Results for these IDs can be requested again. + + diff --git a/_sources/autoapi/ocpapi/client/ui/index.rst b/_sources/autoapi/ocpapi/client/ui/index.rst new file mode 100644 index 000000000..36ab6e630 --- /dev/null +++ b/_sources/autoapi/ocpapi/client/ui/index.rst @@ -0,0 +1,44 @@ +:py:mod:`ocpapi.client.ui` +========================== + +.. py:module:: ocpapi.client.ui + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.ui.get_results_ui_url + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.client.ui._API_TO_UI_HOSTS + + +.. py:data:: _API_TO_UI_HOSTS + :type: Dict[str, str] + + + +.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] + + Generates the URL at which results for the input system can be + visualized. + + :param api_host: The API host on which the system was run. + :param system_id: ID of the system being visualized. + + :returns: The URL at which the input system can be visualized. None if the + API host is not recognized. + + diff --git a/_sources/autoapi/ocpapi/index.rst b/_sources/autoapi/ocpapi/index.rst new file mode 100644 index 000000000..5effd01e8 --- /dev/null +++ b/_sources/autoapi/ocpapi/index.rst @@ -0,0 +1,1005 @@ +:py:mod:`ocpapi` +================ + +.. py:module:: ocpapi + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + client/index.rst + tests/index.rst + workflows/index.rst + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + version/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.Client + ocpapi.Adsorbates + ocpapi.AdsorbateSlabConfigs + ocpapi.AdsorbateSlabRelaxationResult + ocpapi.AdsorbateSlabRelaxationsRequest + ocpapi.AdsorbateSlabRelaxationsResults + ocpapi.AdsorbateSlabRelaxationsSystem + ocpapi.Atoms + ocpapi.Bulk + ocpapi.Bulks + ocpapi.Model + ocpapi.Models + ocpapi.Slab + ocpapi.SlabMetadata + ocpapi.Slabs + ocpapi.Status + ocpapi.AdsorbateBindingSites + ocpapi.AdsorbateSlabRelaxations + ocpapi.Lifetime + ocpapi.keep_all_slabs + ocpapi.keep_slabs_with_miller_indices + ocpapi.prompt_for_slabs_to_keep + ocpapi.RateLimitLogging + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.get_results_ui_url + ocpapi.find_adsorbate_binding_sites + ocpapi.get_adsorbate_slab_relaxation_results + ocpapi.wait_for_adsorbate_slab_relaxations + ocpapi.retry_api_calls + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.NO_LIMIT + ocpapi.NoLimitType + + +.. py:class:: Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https') + + + Exposes each route in the OCP API as a method. + + .. py:property:: host + :type: str + + The host being called by this client. + + .. py:method:: get_models() -> ocpapi.client.models.Models + :async: + + Fetch the list of models that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The models that are supported in the API. + + + .. py:method:: get_bulks() -> ocpapi.client.models.Bulks + :async: + + Fetch the list of bulk materials that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The bulks that are supported throughout the API. + + + .. py:method:: get_adsorbates() -> ocpapi.client.models.Adsorbates + :async: + + Fetch the list of adsorbates that are supported in the API. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The adsorbates that are supported throughout the API. + + + .. py:method:: get_slabs(bulk: Union[str, ocpapi.client.models.Bulk]) -> ocpapi.client.models.Slabs + :async: + + Get a unique list of slabs for the input bulk structure. + + :param bulk: If a string, the id of the bulk to use. Otherwise the Bulk + instance to use. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Slabs for each of the unique surfaces of the material. + + + .. py:method:: get_adsorbate_slab_configs(adsorbate: str, slab: ocpapi.client.models.Slab) -> ocpapi.client.models.AdsorbateSlabConfigs + :async: + + Get a list of possible binding sites for the input adsorbate on the + input slab. + + :param adsorbate: Description of the the adsorbate to place. + :param slab: Information about the slab on which the adsorbate should + be placed. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: Configurations for each adsorbate binding site on the slab. + + + .. py:method:: submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[ocpapi.client.models.Atoms], bulk: ocpapi.client.models.Bulk, slab: ocpapi.client.models.Slab, model: str, ephemeral: bool = False) -> ocpapi.client.models.AdsorbateSlabRelaxationsSystem + :async: + + Starts relaxations of the input adsorbate configurations on the input + slab using energies and forces returned by the input model. Relaxations + are run asynchronously and results can be fetched using the system id + that is returned from this method. + + :param adsorbate: Description of the adsorbate being simulated. + :param adsorbate_configs: List of adsorbate configurations to relax. This + should only include the adsorbates themselves; the surface is + defined in the "slab" field that is a peer to this one. + :param bulk: Details of the bulk material being simulated. + :param slab: The structure of the slab on which adsorbates are placed. + :param model: The model that will be used to evaluate energies and forces + during relaxations. + :param ephemeral: If False (default), any later attempt to delete the + generated relaxations will be rejected. If True, deleting the + relaxations will be allowed, which is generally useful for + testing when there is no reason for results to be persisted. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: IDs of the relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_request(system_id: str) -> ocpapi.client.models.AdsorbateSlabRelaxationsRequest + :async: + + Fetches the original relaxations request for the input system. + + :param system_id: The ID of the system to fetch. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The original request that was made when submitting relaxations. + + + .. py:method:: get_adsorbate_slab_relaxations_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None) -> ocpapi.client.models.AdsorbateSlabRelaxationsResults + :async: + + Fetches relaxation results for the input system. + + :param system_id: The system id of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The relaxation results for each configuration in the system. + + + .. py:method:: delete_adsorbate_slab_relaxations(system_id: str) -> None + :async: + + Deletes all relaxation results for the input system. + + :param system_id: The ID of the system to delete. + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + + .. py:method:: _run_request(path: str, method: str, **kwargs) -> str + :async: + + Helper method that runs the input request on a thread so that + it doesn't block the event loop on the calling thread. + + :param path: The URL path to make the request against. + :param method: The HTTP method to use (GET, POST, etc.). + + :raises RateLimitExceededException: If the call was rejected because a + server side rate limit was breached. + :raises NonRetryableRequestException: If the call was rejected and a retry + is not expected to succeed. + :raises RequestException: For all other errors when making the request; it + is possible, though not guaranteed, that a retry could succeed. + + :returns: The response body from the request as a string. + + + +.. py:exception:: NonRetryableRequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected for a reason that will + not succeed on retry. For example, this might include a malformed request + or action that is not allowed. + + +.. py:exception:: RateLimitExceededException(method: str, url: str, retry_after: Optional[datetime.timedelta] = None) + + + Bases: :py:obj:`RequestException` + + Exception raised when an API call is rejected because a rate limit has + been exceeded. + + .. attribute:: retry_after + + If known, the time to wait before the next attempt to + call the API should be made. + + +.. py:exception:: RequestException(method: str, url: str, cause: str) + + + Bases: :py:obj:`Exception` + + Exception raised any time there is an error while making an API call. + + +.. py:class:: Adsorbates + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch adsorbates supported in the + API. + + .. py:attribute:: adsorbates_supported + :type: List[str] + + List of adsorbates that can be used in the API. + + +.. py:class:: AdsorbateSlabConfigs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch placements of a single + absorbate on a slab. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of structures, each representing one possible adsorbate placement. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which the adsorbate is placed. + + +.. py:class:: AdsorbateSlabRelaxationResult + + + Bases: :py:obj:`_DataModel` + + Stores information about a single adsorbate slab configuration, including + outputs for the model used in relaxations. + + The API to fetch relaxation results supports requesting a subset of fields + in order to limit the size of response payloads. Optional attributes will + be defined only if they are including the response. + + .. py:attribute:: config_id + :type: int + + ID of the configuration within the system. + + .. py:attribute:: status + :type: Status + + The status of the request for information about this configuration. + + .. py:attribute:: system_id + :type: Optional[str] + + The ID of the system in which the configuration was originally submitted. + + .. py:attribute:: cell + :type: Optional[Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Optional[Tuple[bool, bool, bool]] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: Optional[List[int]] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: Optional[List[Tuple[float, float, float]]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: Optional[List[int]] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:attribute:: energy + :type: Optional[float] + + The energy of the configuration. + + .. py:attribute:: energy_trajectory + :type: Optional[List[float]] + + The energy of the configuration at each point along the relaxation + trajectory. + + .. py:attribute:: forces + :type: Optional[List[Tuple[float, float, float]]] + + The forces on each atom in the relaxed structure. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + The predicted energy and forces will also be copied to the new + ase.Atoms object as a SinglePointCalculator (a calculator that + stores the results of an already-run simulation). + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: AdsorbateSlabRelaxationsRequest + + + Bases: :py:obj:`_DataModel` + + Stores the request to submit a new batch of adsorbate slab relaxations. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: adsorbate_configs + :type: List[Atoms] + + List of adsorbate placements being relaxed. + + .. py:attribute:: bulk + :type: Bulk + + Information about the original bulk structure used to create the slab. + + .. py:attribute:: slab + :type: Slab + + The structure of the slab on which adsorbates are placed. + + .. py:attribute:: model + :type: str + + The type of the ML model being used during relaxations. + + .. py:attribute:: ephemeral + :type: Optional[bool] + + Whether the relaxations can be deleted (assume they cannot be deleted if + None). + + .. py:attribute:: adsorbate_reaction + :type: Optional[str] + + If possible, an html-formatted string describing the reaction will be added + to this field. + + +.. py:class:: AdsorbateSlabRelaxationsResults + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for results of adsorbate slab + relaxations. + + .. py:attribute:: configs + :type: List[AdsorbateSlabRelaxationResult] + + List of configurations in the system, each representing one placement of + an adsorbate on a slab surface. + + .. py:attribute:: omitted_config_ids + :type: List[int] + + List of IDs of configurations that were requested but omitted by the + server. Results for these IDs can be requested again. + + +.. py:class:: AdsorbateSlabRelaxationsSystem + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to submit a new batch of adsorbate + slab relaxations. + + .. py:attribute:: system_id + :type: str + + Unique ID for this set of relaxations which can be used to fetch results + later. + + .. py:attribute:: config_ids + :type: List[int] + + The list of IDs assigned to each of the input adsorbate placements, in the + same order in which they were submitted. + + +.. py:class:: Atoms + + + Bases: :py:obj:`_DataModel` + + Subset of the fields from an ASE Atoms object that are used within this + API. + + .. py:attribute:: cell + :type: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] + + 3x3 matrix with unit cell vectors. + + .. py:attribute:: pbc + :type: Tuple[bool, bool, bool] + + Whether the structure is periodic along the a, b, and c lattice vectors, + respectively. + + .. py:attribute:: numbers + :type: List[int] + + The atomic number of each atom in the unit cell. + + .. py:attribute:: positions + :type: List[Tuple[float, float, float]] + + The coordinates of each atom in the unit cell, relative to the cartesian + frame. + + .. py:attribute:: tags + :type: List[int] + + Labels for each atom in the unit cell where 0 represents a subsurface atom + (fixed during optimization), 1 represents a surface atom, and 2 represents + an adsorbate atom. + + .. py:method:: to_ase_atoms() -> ase.Atoms + + Creates an ase.Atoms object with the positions, element numbers, + etc. populated from values on this object. + + :returns: ase.Atoms object with values from this object. + + + +.. py:class:: Bulk + + + Bases: :py:obj:`_DataModel` + + Stores information about a single bulk material. + + .. py:attribute:: src_id + :type: str + + The ID of the material. + + .. py:attribute:: formula + :type: str + + The chemical formula of the material. + + .. py:attribute:: elements + :type: List[str] + + The list of elements in the material. + + +.. py:class:: Bulks + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch bulks supported in the API. + + .. py:attribute:: bulks_supported + :type: List[Bulk] + + List of bulks that can be used in the API. + + +.. py:class:: Model + + + Bases: :py:obj:`_DataModel` + + Stores information about a single model supported in the API. + + .. py:attribute:: id + :type: str + + The ID of the model. + + +.. py:class:: Models + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request for models supported in the API. + + .. py:attribute:: models + :type: List[Model] + + The list of models that are supported. + + +.. py:class:: Slab + + + Bases: :py:obj:`_DataModel` + + Stores all information about a slab that is returned from the API. + + .. py:attribute:: atoms + :type: Atoms + + The structure of the slab. + + .. py:attribute:: metadata + :type: SlabMetadata + + Extra information about the slab. + + +.. py:class:: SlabMetadata + + + Bases: :py:obj:`_DataModel` + + Stores metadata about a slab that is returned from the API. + + .. py:attribute:: bulk_src_id + :type: str + + The ID of the bulk material from which the slab was derived. + + .. py:attribute:: millers + :type: Tuple[int, int, int] + + The Miller indices of the slab relative to bulk structure. + + .. py:attribute:: shift + :type: float + + The position along the vector defined by the Miller indices at which a + cut was taken to generate the slab surface. + + .. py:attribute:: top + :type: bool + + If False, the top and bottom surfaces for this millers/shift pair are + distinct and this slab represents the bottom surface. + + +.. py:class:: Slabs + + + Bases: :py:obj:`_DataModel` + + Stores the response from a request to fetch slabs for a bulk structure. + + .. py:attribute:: slabs + :type: List[Slab] + + The list of slabs that were generated from the input bulk structure. + + +.. py:class:: Status(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Relaxation status of a single adsorbate placement on a slab. + + .. py:attribute:: NOT_AVAILABLE + :value: 'not_available' + + The configuration exists but the result is not yet available. It is + possible that checking again in the future could yield a result. + + .. py:attribute:: FAILED_RELAXATION + :value: 'failed_relaxation' + + The relaxation failed for this configuration. + + .. py:attribute:: SUCCESS + :value: 'success' + + The relaxation was successful and the requested information about the + configuration was returned. + + .. py:attribute:: DOES_NOT_EXIST + :value: 'does_not_exist' + + The requested configuration does not exist. + + .. py:method:: __str__() -> str + + Return str(self). + + + +.. py:function:: get_results_ui_url(api_host: str, system_id: str) -> Optional[str] + + Generates the URL at which results for the input system can be + visualized. + + :param api_host: The API host on which the system was run. + :param system_id: ID of the system being visualized. + + :returns: The URL at which the input system can be visualized. None if the + API host is not recognized. + + +.. py:class:: AdsorbateBindingSites + + + Stores the inputs and results of a set of relaxations of adsorbate + placements on the surface of a slab. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: bulk + :type: fairchem.demo.ocpapi.client.Bulk + + The bulk material that was being modeled. + + .. py:attribute:: model + :type: str + + The type of the model that was run. + + .. py:attribute:: slabs + :type: List[AdsorbateSlabRelaxations] + + The list of slabs that were generated from the bulk structure. Each + contains its own list of adsorbate placements. + + +.. py:class:: AdsorbateSlabRelaxations + + + Stores the relaxations of adsorbate placements on the surface of a slab. + + .. py:attribute:: slab + :type: fairchem.demo.ocpapi.client.Slab + + The slab on which the adsorbate was placed. + + .. py:attribute:: configs + :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + + Details of the relaxation of each adsorbate placement, including the + final position. + + .. py:attribute:: system_id + :type: str + + The ID of the system that stores all of the relaxations. + + .. py:attribute:: api_host + :type: str + + The API host on which the relaxations were run. + + .. py:attribute:: ui_url + :type: Optional[str] + + The URL at which results can be visualized. + + +.. py:class:: Lifetime(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Represents different lifetimes when running relaxations. + + .. py:attribute:: SAVE + + The relaxation will be available on API servers indefinitely. It will not + be possible to delete the relaxation in the future. + + .. py:attribute:: MARK_EPHEMERAL + + The relaxation will be saved on API servers, but can be deleted at any time + in the future. + + .. py:attribute:: DELETE + + The relaxation will be deleted from API servers as soon as the results have + been fetched. + + +.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when an adsorbate is not supported in the API. + + +.. py:exception:: UnsupportedBulkException(bulk: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a bulk material is not supported in the API. + + +.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a model is not supported in the API. + + +.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites + :async: + + Search for adsorbate binding sites on surfaces of a bulk material. + This executes the following steps: + + 1. Ensure that both the adsorbate and bulk are supported in the + OCP API. + 2. Enumerate unique surfaces from the bulk material. + 3. Enumerate likely binding sites for the input adsorbate on each + of the generated surfaces. + 4. Filter the list of generated adsorbate/slab (adslab) configurations + using the input adslab_filter. + 5. Relax each generated surface+adsorbate structure by refining + atomic positions to minimize forces generated by the input model. + + :param adsorbate: Description of the adsorbate to place. + :param bulk: The ID (typically Materials Project MP ID) of the bulk material + on which the adsorbate will be placed. + :param model: The type of the model to use when calculating forces during + relaxations. + :param adslab_filter: A function that modifies the set of adsorbate/slab + configurations that will be relaxed. This can be used to subselect + slabs and/or adsorbate configurations. + :param client: The OCP API client to use. + :param lifetime: Whether relaxations should be saved on the server, be marked + as ephemeral (allowing them to deleted in the future), or deleted + immediately. + + :returns: Details of each adsorbate binding site, including results of relaxing + to locally-optimized positions using the input model. + + :raises UnsupportedModelException: If the requested model is not supported. + :raises UnsupportedBulkException: If the requested bulk is not supported. + :raises UnsupportedAdsorbateException: If the requested adsorbate is not + supported. + + +.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + :async: + + Wrapper around Client.get_adsorbate_slab_relaxations_results() that + handles retries, including re-fetching individual configurations that + are initially omitted. + + :param client: The client to use when making API calls. + :param system_id: The system ID of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :returns: List of relaxation results, one for each adsorbate configuration in + the system. + + +.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] + :async: + + Blocks until all relaxations in the input system have finished, whether + successfully or not. + + Relaxations are queued in the API, waiting until machines are ready to + run them. Once started, they can take 1-2 minutes to finish. This method + initially sleeps "slow_interval_sec" seconds between each check for any + relaxations having finished. Once at least one result is ready, subsequent + sleeps are for "fast_interval_sec" seconds. + + :param system_id: The ID of the system for which relaxations are running. + :param check_immediately: If False (default), sleep before the first check + for relaxations having finished. If True, check whether relaxations + have finished immediately on entering this function. + :param slow_interval_sec: The number of seconds to wait between each check + while all are still running. + :param fast_interval_sec: The number of seconds to wait between each check + when at least one relaxation has finished in the system. + :param pbar: A tqdm instance that tracks the number of configurations that + have finished. This will be updated with the number of individual + configurations whose relaxations have finished. + :param client: The client to use when making API calls. + + :returns: Map of config IDs in the system to their terminal status. + + +.. py:class:: keep_all_slabs + + + Adslab filter than returns all slabs. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) + + + Adslab filter that keeps any slabs with the configured miller indices. + Slabs with other miller indices will be ignored. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: prompt_for_slabs_to_keep + + + Adslab filter than presents the user with an interactive prompt to choose + which of the input slabs to keep. + + .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] + :staticmethod: + + Generates a sort key from the input adslab. Returns the miller indices, + shift, and top/bottom label so that they will be sorted by those values + in that order. + + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:data:: NO_LIMIT + :type: NoLimitType + :value: 0 + + + +.. py:data:: NoLimitType + + + +.. py:class:: RateLimitLogging + + + Controls logging when rate limits are hit. + + .. py:attribute:: logger + :type: logging.Logger + + The logger to use. + + .. py:attribute:: action + :type: str + + A short description of the action being attempted. + + +.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any + + Decorator with sensible defaults for retrying calls to the OCP API. + + :param max_attempts: The maximum number of calls to make. If NO_LIMIT, + retries will be made forever. + :param rate_limit_logging: If not None, log statements will be generated + using this configuration when a rate limit is hit. + :param fixed_wait_sec: The fixed number of seconds to wait when retrying an + exception that does *not* include a retry-after value. The default + value is sensible; this is exposed mostly for testing. + :param max_jitter_sec: The maximum number of seconds that will be randomly + added to wait times. The default value is sensible; this is exposed + mostly for testing. + + diff --git a/_sources/autoapi/ocpapi/tests/index.rst b/_sources/autoapi/ocpapi/tests/index.rst new file mode 100644 index 000000000..1cd223730 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/index.rst @@ -0,0 +1,16 @@ +:py:mod:`ocpapi.tests` +====================== + +.. py:module:: ocpapi.tests + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + integration/index.rst + unit/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/integration/client/index.rst b/_sources/autoapi/ocpapi/tests/integration/client/index.rst new file mode 100644 index 000000000..9c974fdc2 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/client/index.rst @@ -0,0 +1,16 @@ +:py:mod:`ocpapi.tests.integration.client` +========================================= + +.. py:module:: ocpapi.tests.integration.client + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_client/index.rst + test_ui/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/integration/client/test_client/index.rst b/_sources/autoapi/ocpapi/tests/integration/client/test_client/index.rst new file mode 100644 index 000000000..21b6a4f80 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/client/test_client/index.rst @@ -0,0 +1,105 @@ +:py:mod:`ocpapi.tests.integration.client.test_client` +===================================================== + +.. py:module:: ocpapi.tests.integration.client.test_client + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.integration.client.test_client.TestClient + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.integration.client.test_client._ensure_system_deleted + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.integration.client.test_client.log + + +.. py:data:: log + + + +.. py:function:: _ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> AsyncGenerator[None, None] + :async: + + Immediately yields control to the caller. When control returns to this + function, try to delete the system with the input id. + + +.. py:class:: TestClient(methodName='runTest') + + + Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` + + Tests that calls to a real server are handled correctly. + + .. py:attribute:: CLIENT + :type: fairchem.demo.ocpapi.client.Client + + + + .. py:attribute:: KNOWN_SYSTEM_ID + :type: str + :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' + + + + .. py:method:: test_get_models() -> None + :async: + + + .. py:method:: test_get_bulks() -> None + :async: + + + .. py:method:: test_get_adsorbates() -> None + :async: + + + .. py:method:: test_get_slabs() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_configs() -> None + :async: + + + .. py:method:: test_submit_adsorbate_slab_relaxations__gemnet_oc() -> None + :async: + + + .. py:method:: test_submit_adsorbate_slab_relaxations__equiformer_v2() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_request() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_results__all_fields_and_configs() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_results__limited_fields_and_configs() -> None + :async: + + + diff --git a/_sources/autoapi/ocpapi/tests/integration/client/test_ui/index.rst b/_sources/autoapi/ocpapi/tests/integration/client/test_ui/index.rst new file mode 100644 index 000000000..40e8c01fb --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/client/test_ui/index.rst @@ -0,0 +1,42 @@ +:py:mod:`ocpapi.tests.integration.client.test_ui` +================================================= + +.. py:module:: ocpapi.tests.integration.client.test_ui + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.integration.client.test_ui.TestUI + + + + +.. py:class:: TestUI(methodName='runTest') + + + Bases: :py:obj:`unittest.TestCase` + + Tests that calls to a real server are handled correctly. + + .. py:attribute:: API_HOST + :type: str + :value: 'open-catalyst-api.metademolab.com' + + + + .. py:attribute:: KNOWN_SYSTEM_ID + :type: str + :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' + + + + .. py:method:: test_get_results_ui_url() -> None + + + diff --git a/_sources/autoapi/ocpapi/tests/integration/index.rst b/_sources/autoapi/ocpapi/tests/integration/index.rst new file mode 100644 index 000000000..bb122bd07 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/index.rst @@ -0,0 +1,16 @@ +:py:mod:`ocpapi.tests.integration` +================================== + +.. py:module:: ocpapi.tests.integration + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + client/index.rst + workflows/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/integration/workflows/index.rst b/_sources/autoapi/ocpapi/tests/integration/workflows/index.rst new file mode 100644 index 000000000..32b67d762 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/workflows/index.rst @@ -0,0 +1,15 @@ +:py:mod:`ocpapi.tests.integration.workflows` +============================================ + +.. py:module:: ocpapi.tests.integration.workflows + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_adsorbates/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/integration/workflows/test_adsorbates/index.rst b/_sources/autoapi/ocpapi/tests/integration/workflows/test_adsorbates/index.rst new file mode 100644 index 000000000..a87115e36 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/integration/workflows/test_adsorbates/index.rst @@ -0,0 +1,50 @@ +:py:mod:`ocpapi.tests.integration.workflows.test_adsorbates` +============================================================ + +.. py:module:: ocpapi.tests.integration.workflows.test_adsorbates + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.integration.workflows.test_adsorbates.TestAdsorbates + + + + +.. py:class:: TestAdsorbates(methodName='runTest') + + + Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` + + Tests that workflow methods run against a real server execute correctly. + + .. py:attribute:: CLIENT + :type: fairchem.demo.ocpapi.client.Client + + + + .. py:attribute:: KNOWN_SYSTEM_ID + :type: str + :value: 'f9eacd8f-748c-41dd-ae43-f263dd36d735' + + + + .. py:method:: test_get_adsorbate_slab_relaxation_results() -> None + :async: + + + .. py:method:: test_wait_for_adsorbate_slab_relaxations() -> None + :async: + + + .. py:method:: test_find_adsorbate_binding_sites() -> None + :async: + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/client/index.rst b/_sources/autoapi/ocpapi/tests/unit/client/index.rst new file mode 100644 index 000000000..c5c11f4db --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/client/index.rst @@ -0,0 +1,17 @@ +:py:mod:`ocpapi.tests.unit.client` +================================== + +.. py:module:: ocpapi.tests.unit.client + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_client/index.rst + test_models/index.rst + test_ui/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/unit/client/test_client/index.rst b/_sources/autoapi/ocpapi/tests/unit/client/test_client/index.rst new file mode 100644 index 000000000..e3acb888e --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/client/test_client/index.rst @@ -0,0 +1,78 @@ +:py:mod:`ocpapi.tests.unit.client.test_client` +============================================== + +.. py:module:: ocpapi.tests.unit.client.test_client + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.client.test_client.TestClient + + + + +.. py:class:: TestClient(methodName='runTest') + + + Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` + + Tests with mocked responses to ensure that they are handled correctly. + + .. py:method:: _run_common_tests_against_route(method: str, route: str, client_method_name: str, successful_response_code: int, successful_response_body: str, successful_response_object: Optional[fairchem.demo.ocpapi.client.models._DataModel], client_method_args: Optional[Dict[str, Any]] = None, expected_request_params: Optional[Dict[str, Any]] = None, expected_request_body: Optional[Dict[str, Any]] = None) -> None + :async: + + + .. py:method:: test_host() -> None + + + .. py:method:: test_get_models() -> None + :async: + + + .. py:method:: test_get_bulks() -> None + :async: + + + .. py:method:: test_get_adsorbates() -> None + :async: + + + .. py:method:: test_get_slabs__bulk_by_id() -> None + :async: + + + .. py:method:: test_get_slabs__bulk_by_obj() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_configurations() -> None + :async: + + + .. py:method:: test_submit_adsorbate_slab_relaxations() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_request() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_results__all_args() -> None + :async: + + + .. py:method:: test_get_adsorbate_slab_relaxations_results__req_args_only() -> None + :async: + + + .. py:method:: test_delete_adsorbate_slab_relaxations() -> None + :async: + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/client/test_models/index.rst b/_sources/autoapi/ocpapi/tests/unit/client/test_models/index.rst new file mode 100644 index 000000000..c268ccceb --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/client/test_models/index.rst @@ -0,0 +1,207 @@ +:py:mod:`ocpapi.tests.unit.client.test_models` +============================================== + +.. py:module:: ocpapi.tests.unit.client.test_models + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.client.test_models.ModelTestWrapper + ocpapi.tests.unit.client.test_models.TestModel + ocpapi.tests.unit.client.test_models.TestModels + ocpapi.tests.unit.client.test_models.TestBulk + ocpapi.tests.unit.client.test_models.TestBulks + ocpapi.tests.unit.client.test_models.TestAdsorbates + ocpapi.tests.unit.client.test_models.TestAtoms + ocpapi.tests.unit.client.test_models.TestSlabMetadata + ocpapi.tests.unit.client.test_models.TestSlab + ocpapi.tests.unit.client.test_models.TestSlabs + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabConfigs + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsSystem + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest_req_fields_only + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult_req_fields_only + ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsResults + + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.client.test_models.T + + +.. py:data:: T + + + +.. py:class:: ModelTestWrapper + + + .. py:class:: ModelTest(*args: Any, obj: T, obj_json: str, **kwargs: Any) + + + Bases: :py:obj:`unittest.TestCase`, :py:obj:`Generic`\ [\ :py:obj:`T`\ ] + + Base class for all tests below that assert behavior of data models. + + .. py:method:: test_from_json() -> None + + + .. py:method:: test_to_json() -> None + + + .. py:method:: assertJsonEqual(first: str, second: str) -> None + + Compares two JSON-formatted strings by deserializing them and then + comparing the generated built-in types. + + + + +.. py:class:: TestModel(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Model`\ ] + + Serde tests for the Model data model. + + +.. py:class:: TestModels(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Models`\ ] + + Serde tests for the Models data model. + + +.. py:class:: TestBulk(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Bulk`\ ] + + Serde tests for the Bulk data model. + + +.. py:class:: TestBulks(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Bulks`\ ] + + Serde tests for the Bulks data model. + + +.. py:class:: TestAdsorbates(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Adsorbates`\ ] + + Serde tests for the Adsorbates data model. + + +.. py:class:: TestAtoms(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Atoms`\ ] + + Serde tests for the Atoms data model. + + .. py:method:: test_to_ase_atoms() -> None + + + +.. py:class:: TestSlabMetadata(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.SlabMetadata`\ ] + + Serde tests for the SlabMetadata data model. + + +.. py:class:: TestSlab(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Slab`\ ] + + Serde tests for the Slab data model. + + +.. py:class:: TestSlabs(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.Slabs`\ ] + + Serde tests for the Slabs data model. + + +.. py:class:: TestAdsorbateSlabConfigs(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabConfigs`\ ] + + Serde tests for the AdsorbateSlabConfigs data model. + + +.. py:class:: TestAdsorbateSlabRelaxationsSystem(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsSystem`\ ] + + Serde tests for the AdsorbateSlabRelaxationsSystem data model. + + +.. py:class:: TestAdsorbateSlabRelaxationsRequest(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest`\ ] + + Serde tests for the AdsorbateSlabRelaxationsRequest data model. + + +.. py:class:: TestAdsorbateSlabRelaxationsRequest_req_fields_only(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest`\ ] + + Serde tests for the AdsorbateSlabRelaxationsRequest data model in which + optional fields are omitted. + + +.. py:class:: TestAdsorbateSlabRelaxationResult(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult`\ ] + + Serde tests for the AdsorbateSlabRelaxationResult data model. + + .. py:method:: test_to_ase_atoms() -> None + + + +.. py:class:: TestAdsorbateSlabRelaxationResult_req_fields_only(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult`\ ] + + Serde tests for the AdsorbateSlabRelaxationResult data model in which + optional fields are omitted. + + +.. py:class:: TestAdsorbateSlabRelaxationsResults(*args: Any, **kwargs: Any) + + + Bases: :py:obj:`ModelTestWrapper`\ [\ :py:obj:`fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults`\ ] + + Serde tests for the AdsorbateSlabRelaxationsResults data model. + + diff --git a/_sources/autoapi/ocpapi/tests/unit/client/test_ui/index.rst b/_sources/autoapi/ocpapi/tests/unit/client/test_ui/index.rst new file mode 100644 index 000000000..7fcfe0595 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/client/test_ui/index.rst @@ -0,0 +1,59 @@ +:py:mod:`ocpapi.tests.unit.client.test_ui` +========================================== + +.. py:module:: ocpapi.tests.unit.client.test_ui + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.client.test_ui.TestUI + + + + +.. py:class:: TestUI(methodName='runTest') + + + Bases: :py:obj:`unittest.TestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test_get_results_ui_url() -> None + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/index.rst b/_sources/autoapi/ocpapi/tests/unit/index.rst new file mode 100644 index 000000000..930aaf45b --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/index.rst @@ -0,0 +1,16 @@ +:py:mod:`ocpapi.tests.unit` +=========================== + +.. py:module:: ocpapi.tests.unit + + +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 3 + + client/index.rst + workflows/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/unit/workflows/index.rst b/_sources/autoapi/ocpapi/tests/unit/workflows/index.rst new file mode 100644 index 000000000..7216156e9 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/workflows/index.rst @@ -0,0 +1,18 @@ +:py:mod:`ocpapi.tests.unit.workflows` +===================================== + +.. py:module:: ocpapi.tests.unit.workflows + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + test_adsorbates/index.rst + test_context/index.rst + test_filter/index.rst + test_retry/index.rst + + diff --git a/_sources/autoapi/ocpapi/tests/unit/workflows/test_adsorbates/index.rst b/_sources/autoapi/ocpapi/tests/unit/workflows/test_adsorbates/index.rst new file mode 100644 index 000000000..ca0dc8ebf --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/workflows/test_adsorbates/index.rst @@ -0,0 +1,135 @@ +:py:mod:`ocpapi.tests.unit.workflows.test_adsorbates` +===================================================== + +.. py:module:: ocpapi.tests.unit.workflows.test_adsorbates + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_adsorbates.MockGetRelaxationResults + ocpapi.tests.unit.workflows.test_adsorbates.TestMockGetRelaxationResults + ocpapi.tests.unit.workflows.test_adsorbates.TestAdsorbates + + + + +.. py:exception:: TestException + + + Bases: :py:obj:`Exception` + + Common base class for all non-exit exceptions. + + .. py:attribute:: __test__ + :value: False + + + + +.. py:class:: MockGetRelaxationResults(num_configs: int, max_configs_to_return: int, status_to_return: Optional[Iterable[fairchem.demo.ocpapi.client.Status]] = None, raise_on_first_call: Optional[Exception] = None) + + + Helper that can be used to mock calls to + Client.get_adsorbate_slab_relaxations_results(). This allows for + some configs to be returned with "success" status and others to be + omitted, similar to the behavior in the API. + + .. py:method:: __call__(*args: Any, config_ids: Optional[List[int]] = None, **kwargs: Any) -> fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults + + + +.. py:class:: TestMockGetRelaxationResults(methodName='runTest') + + + Bases: :py:obj:`unittest.TestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test___call__() -> None + + + +.. py:class:: TestAdsorbates(methodName='runTest') + + + Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test_get_adsorbate_slab_relaxation_results() -> None + :async: + + + .. py:method:: test_wait_for_adsorbate_slab_relaxations() -> None + :async: + + + .. py:method:: test_find_adsorbate_binding_sites() -> None + :async: + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/workflows/test_context/index.rst b/_sources/autoapi/ocpapi/tests/unit/workflows/test_context/index.rst new file mode 100644 index 000000000..774aa94d6 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/workflows/test_context/index.rst @@ -0,0 +1,59 @@ +:py:mod:`ocpapi.tests.unit.workflows.test_context` +================================================== + +.. py:module:: ocpapi.tests.unit.workflows.test_context + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_context.TestContext + + + + +.. py:class:: TestContext(methodName='runTest') + + + Bases: :py:obj:`unittest.TestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test_set_context_var() -> None + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/workflows/test_filter/index.rst b/_sources/autoapi/ocpapi/tests/unit/workflows/test_filter/index.rst new file mode 100644 index 000000000..f80e34988 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/workflows/test_filter/index.rst @@ -0,0 +1,79 @@ +:py:mod:`ocpapi.tests.unit.workflows.test_filter` +================================================= + +.. py:module:: ocpapi.tests.unit.workflows.test_filter + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_filter.TestFilter + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_filter._new_adslab + + + +.. py:function:: _new_adslab(miller_indices: Optional[Tuple[int, int, int]] = None) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs + + +.. py:class:: TestFilter(methodName='runTest') + + + Bases: :py:obj:`unittest.IsolatedAsyncioTestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test_keep_all_slabs() -> None + :async: + + + .. py:method:: test_keep_slabs_with_miller_indices() -> None + :async: + + + .. py:method:: test_prompt_for_slabs_to_keep() -> None + :async: + + + diff --git a/_sources/autoapi/ocpapi/tests/unit/workflows/test_retry/index.rst b/_sources/autoapi/ocpapi/tests/unit/workflows/test_retry/index.rst new file mode 100644 index 000000000..225e61245 --- /dev/null +++ b/_sources/autoapi/ocpapi/tests/unit/workflows/test_retry/index.rst @@ -0,0 +1,92 @@ +:py:mod:`ocpapi.tests.unit.workflows.test_retry` +================================================ + +.. py:module:: ocpapi.tests.unit.workflows.test_retry + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_retry.TestRetry + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_retry.returns + ocpapi.tests.unit.workflows.test_retry.raises + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.tests.unit.workflows.test_retry.T + + +.. py:data:: T + + + +.. py:function:: returns(val: T) -> Callable[[], T] + + +.. py:function:: raises(ex: Exception) -> Callable[[], None] + + +.. py:class:: TestRetry(methodName='runTest') + + + Bases: :py:obj:`unittest.TestCase` + + A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + .. py:method:: test_retry_api_calls__results() -> None + + + .. py:method:: test_retry_api_calls__wait() -> None + + + .. py:method:: test_retry_api_calls__logging() -> None + + + diff --git a/_sources/autoapi/ocpapi/version/index.rst b/_sources/autoapi/ocpapi/version/index.rst new file mode 100644 index 000000000..d88a0821f --- /dev/null +++ b/_sources/autoapi/ocpapi/version/index.rst @@ -0,0 +1,14 @@ +:py:mod:`ocpapi.version` +======================== + +.. py:module:: ocpapi.version + + +Module Contents +--------------- + +.. py:data:: VERSION + :value: '1.0.0' + + + diff --git a/_sources/autoapi/ocpapi/workflows/adsorbates/index.rst b/_sources/autoapi/ocpapi/workflows/adsorbates/index.rst new file mode 100644 index 000000000..5fe133860 --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/adsorbates/index.rst @@ -0,0 +1,455 @@ +:py:mod:`ocpapi.workflows.adsorbates` +===================================== + +.. py:module:: ocpapi.workflows.adsorbates + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.adsorbates.Lifetime + ocpapi.workflows.adsorbates.AdsorbateSlabRelaxations + ocpapi.workflows.adsorbates.AdsorbateBindingSites + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.adsorbates._setup_log_record_factory + ocpapi.workflows.adsorbates._ensure_model_supported + ocpapi.workflows.adsorbates._get_bulk_if_supported + ocpapi.workflows.adsorbates._ensure_adsorbate_supported + ocpapi.workflows.adsorbates._get_slabs + ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab + ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab_with_logging + ocpapi.workflows.adsorbates._get_adsorbate_configs_on_slabs + ocpapi.workflows.adsorbates._submit_relaxations + ocpapi.workflows.adsorbates._submit_relaxations_with_progress_logging + ocpapi.workflows.adsorbates.get_adsorbate_slab_relaxation_results + ocpapi.workflows.adsorbates.wait_for_adsorbate_slab_relaxations + ocpapi.workflows.adsorbates._delete_system + ocpapi.workflows.adsorbates._ensure_system_deleted + ocpapi.workflows.adsorbates._run_relaxations_on_slab + ocpapi.workflows.adsorbates._refresh_pbar + ocpapi.workflows.adsorbates._relax_binding_sites_on_slabs + ocpapi.workflows.adsorbates.find_adsorbate_binding_sites + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.adsorbates._CTX_AD_BULK + ocpapi.workflows.adsorbates._CTX_SLAB + ocpapi.workflows.adsorbates.DEFAULT_CLIENT + ocpapi.workflows.adsorbates._DEFAULT_ADSLAB_FILTER + + +.. py:data:: _CTX_AD_BULK + :type: contextvars.ContextVar[Tuple[str, str]] + + + +.. py:data:: _CTX_SLAB + :type: contextvars.ContextVar[fairchem.demo.ocpapi.client.Slab] + + + +.. py:function:: _setup_log_record_factory() -> None + + Adds a log record factory that stores information about the currently + running job on a log message. + + +.. py:data:: DEFAULT_CLIENT + :type: fairchem.demo.ocpapi.client.Client + + + +.. py:exception:: AdsorbatesException + + + Bases: :py:obj:`Exception` + + Base exception for all others in this module. + + +.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a model is not supported in the API. + + +.. py:exception:: UnsupportedBulkException(bulk: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a bulk material is not supported in the API. + + +.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when an adsorbate is not supported in the API. + + +.. py:class:: Lifetime(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Represents different lifetimes when running relaxations. + + .. py:attribute:: SAVE + + The relaxation will be available on API servers indefinitely. It will not + be possible to delete the relaxation in the future. + + .. py:attribute:: MARK_EPHEMERAL + + The relaxation will be saved on API servers, but can be deleted at any time + in the future. + + .. py:attribute:: DELETE + + The relaxation will be deleted from API servers as soon as the results have + been fetched. + + +.. py:class:: AdsorbateSlabRelaxations + + + Stores the relaxations of adsorbate placements on the surface of a slab. + + .. py:attribute:: slab + :type: fairchem.demo.ocpapi.client.Slab + + The slab on which the adsorbate was placed. + + .. py:attribute:: configs + :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + + Details of the relaxation of each adsorbate placement, including the + final position. + + .. py:attribute:: system_id + :type: str + + The ID of the system that stores all of the relaxations. + + .. py:attribute:: api_host + :type: str + + The API host on which the relaxations were run. + + .. py:attribute:: ui_url + :type: Optional[str] + + The URL at which results can be visualized. + + +.. py:class:: AdsorbateBindingSites + + + Stores the inputs and results of a set of relaxations of adsorbate + placements on the surface of a slab. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: bulk + :type: fairchem.demo.ocpapi.client.Bulk + + The bulk material that was being modeled. + + .. py:attribute:: model + :type: str + + The type of the model that was run. + + .. py:attribute:: slabs + :type: List[AdsorbateSlabRelaxations] + + The list of slabs that were generated from the bulk structure. Each + contains its own list of adsorbate placements. + + +.. py:function:: _ensure_model_supported(client: fairchem.demo.ocpapi.client.Client, model: str) -> None + :async: + + Checks that the input model is supported in the API. + + :param client: The client to use when making requests to the API. + :param model: The model to check. + + :raises UnsupportedModelException: If the model is not supported. + + +.. py:function:: _get_bulk_if_supported(client: fairchem.demo.ocpapi.client.Client, bulk: str) -> fairchem.demo.ocpapi.client.Bulk + :async: + + Returns the object from the input bulk if it is supported in the API. + + :param client: The client to use when making requests to the API. + :param bulk: The bulk to fetch. + + :raises UnsupportedBulkException: If the requested bulk is not supported. + + :returns: Bulk instance for the input type. + + +.. py:function:: _ensure_adsorbate_supported(client: fairchem.demo.ocpapi.client.Client, adsorbate: str) -> None + :async: + + Checks that the input adsorbate is supported in the API. + + :param client: The client to use when making requests to the API. + :param adsorbate: The adsorbate to check. + + :raises UnsupportedAdsorbateException: If the adsorbate is not supported. + + +.. py:function:: _get_slabs(client: fairchem.demo.ocpapi.client.Client, bulk: fairchem.demo.ocpapi.client.Bulk) -> List[fairchem.demo.ocpapi.client.Slab] + :async: + + Enumerates surfaces for the input bulk material. + + :param client: The client to use when making requests to the API. + :param bulk: The bulk material from which slabs will be generated. + + :returns: The list of slabs that were generated. + + +.. py:function:: _get_absorbate_configs_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs + :async: + + Generate initial guesses at adsorbate binding sites on the input slab. + + :param client: The client to use when making API calls. + :param adsorbate: Description of the adsorbate to place. + :param slab: The slab on which the adsorbate should be placed. + + :returns: An updated slab instance that has had tags applied to it and a list + of Atoms objects, each with the positions of the adsorbate atoms on + one of the candidate binding sites. + + +.. py:function:: _get_absorbate_configs_on_slab_with_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) -> fairchem.demo.ocpapi.client.AdsorbateSlabConfigs + :async: + + Wrapper around _get_absorbate_configs_on_slab that adds logging. + + +.. py:function:: _get_adsorbate_configs_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slabs: List[fairchem.demo.ocpapi.client.Slab]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + Finds candidate adsorbate binding sites on each of the input slabs. + + :param client: The client to use when making API calls. + :param adsorbate: Description of the adsorbate to place. + :param slabs: The slabs on which the adsorbate should be placed. + + :returns: List of slabs and, for each, the positions of the adsorbate + atoms in the potential binding site. + + +.. py:function:: _submit_relaxations(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) -> str + :async: + + Start relaxations for each of the input adsorbate configurations on the + input slab. + + :param client: The client to use when making API calls. + :param adsorbate: Description of the adsorbate to place. + :param adsorbate_configs: Positions of the adsorbate on the slab. Each + will be relaxed independently. + :param bulk: The bulk material from which the slab was generated. + :param slab: The slab that should be searched for adsorbate binding sites. + :param model: The model to use when evaluating forces and energies. + :param ephemeral: Whether the relaxations should be marked as ephemeral. + + :returns: The system ID of the relaxation run, which can be used to fetch results + as they become available. + + +.. py:function:: _submit_relaxations_with_progress_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) -> str + :async: + + Wrapper around _submit_relaxations that adds periodic logging in case + calls to submit relaxations are being rate limited. + + +.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + :async: + + Wrapper around Client.get_adsorbate_slab_relaxations_results() that + handles retries, including re-fetching individual configurations that + are initially omitted. + + :param client: The client to use when making API calls. + :param system_id: The system ID of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :returns: List of relaxation results, one for each adsorbate configuration in + the system. + + +.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] + :async: + + Blocks until all relaxations in the input system have finished, whether + successfully or not. + + Relaxations are queued in the API, waiting until machines are ready to + run them. Once started, they can take 1-2 minutes to finish. This method + initially sleeps "slow_interval_sec" seconds between each check for any + relaxations having finished. Once at least one result is ready, subsequent + sleeps are for "fast_interval_sec" seconds. + + :param system_id: The ID of the system for which relaxations are running. + :param check_immediately: If False (default), sleep before the first check + for relaxations having finished. If True, check whether relaxations + have finished immediately on entering this function. + :param slow_interval_sec: The number of seconds to wait between each check + while all are still running. + :param fast_interval_sec: The number of seconds to wait between each check + when at least one relaxation has finished in the system. + :param pbar: A tqdm instance that tracks the number of configurations that + have finished. This will be updated with the number of individual + configurations whose relaxations have finished. + :param client: The client to use when making API calls. + + :returns: Map of config IDs in the system to their terminal status. + + +.. py:function:: _delete_system(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> None + :async: + + Deletes the input system, with retries on failed attempts. + + :param client: The client to use when making API calls. + :param system_id: The ID of the system to delete. + + +.. py:function:: _ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) -> AsyncGenerator[None, None] + :async: + + Immediately yields control to the caller. When control returns to this + function, try to delete the system with the input id. + + :param client: The client to use when making API calls. + :param system_id: The ID of the system to delete. + + +.. py:function:: _run_relaxations_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, lifetime: Lifetime, pbar: tqdm.tqdm) -> AdsorbateSlabRelaxations + :async: + + Start relaxations for each adsorbate configuration on the input slab + and wait for all to finish. + + :param client: The client to use when making API calls. + :param adsorbate: Description of the adsorbate to place. + :param adsorbate_configs: The positions of atoms in each adsorbate placement + to be relaxed. + :param bulk: The bulk material from which the slab was generated. + :param slab: The slab that should be searched for adsorbate binding sites. + :param model: The model to use when evaluating forces and energies. + :param lifetime: Whether relaxations should be saved on the server, be marked + as ephemeral (allowing them to deleted in the future), or deleted + immediately. + :param pbar: A progress bar to update as relaxations finish. + + :returns: Details of each adsorbate placement, including its relaxed position. + + +.. py:function:: _refresh_pbar(pbar: tqdm.tqdm, interval_sec: float) -> None + :async: + + Helper function that refreshes the input progress bar on a regular + schedule. This function never returns; it must be cancelled. + + :param pbar: The progress bar to refresh. + :param interval_sec: The number of seconds to wait between each refresh. + + +.. py:function:: _relax_binding_sites_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, bulk: fairchem.demo.ocpapi.client.Bulk, adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs], model: str, lifetime: Lifetime) -> AdsorbateBindingSites + :async: + + Search for adsorbate binding sites on the input slab. + + :param client: The client to use when making API calls. + :param adsorbate: Description of the adsorbate to place. + :param bulk: The bulk material from which the slab was generated. + :param adslabs: The slabs and, for each, the binding sites that should be + relaxed. + :param model: The model to use when evaluating forces and energies. + :param lifetime: Whether relaxations should be saved on the server, be marked + as ephemeral (allowing them to deleted in the future), or deleted + immediately. + + :returns: Details of each adsorbate placement, including its relaxed position. + + +.. py:data:: _DEFAULT_ADSLAB_FILTER + :type: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] + + + +.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites + :async: + + Search for adsorbate binding sites on surfaces of a bulk material. + This executes the following steps: + + 1. Ensure that both the adsorbate and bulk are supported in the + OCP API. + 2. Enumerate unique surfaces from the bulk material. + 3. Enumerate likely binding sites for the input adsorbate on each + of the generated surfaces. + 4. Filter the list of generated adsorbate/slab (adslab) configurations + using the input adslab_filter. + 5. Relax each generated surface+adsorbate structure by refining + atomic positions to minimize forces generated by the input model. + + :param adsorbate: Description of the adsorbate to place. + :param bulk: The ID (typically Materials Project MP ID) of the bulk material + on which the adsorbate will be placed. + :param model: The type of the model to use when calculating forces during + relaxations. + :param adslab_filter: A function that modifies the set of adsorbate/slab + configurations that will be relaxed. This can be used to subselect + slabs and/or adsorbate configurations. + :param client: The OCP API client to use. + :param lifetime: Whether relaxations should be saved on the server, be marked + as ephemeral (allowing them to deleted in the future), or deleted + immediately. + + :returns: Details of each adsorbate binding site, including results of relaxing + to locally-optimized positions using the input model. + + :raises UnsupportedModelException: If the requested model is not supported. + :raises UnsupportedBulkException: If the requested bulk is not supported. + :raises UnsupportedAdsorbateException: If the requested adsorbate is not + supported. + + diff --git a/_sources/autoapi/ocpapi/workflows/context/index.rst b/_sources/autoapi/ocpapi/workflows/context/index.rst new file mode 100644 index 000000000..76b234a4e --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/context/index.rst @@ -0,0 +1,29 @@ +:py:mod:`ocpapi.workflows.context` +================================== + +.. py:module:: ocpapi.workflows.context + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.context.set_context_var + + + +.. py:function:: set_context_var(context_var: contextvars.ContextVar, value: Any) -> Generator[None, None, None] + + Sets the input convext variable to the input value and yields control + back to the caller. When control returns to this function, the context + variable is reset to its original value. + + :param context_var: The context variable to set. + :param value: The value to assign to the variable. + + diff --git a/_sources/autoapi/ocpapi/workflows/filter/index.rst b/_sources/autoapi/ocpapi/workflows/filter/index.rst new file mode 100644 index 000000000..8002fa129 --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/filter/index.rst @@ -0,0 +1,61 @@ +:py:mod:`ocpapi.workflows.filter` +================================= + +.. py:module:: ocpapi.workflows.filter + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.filter.keep_all_slabs + ocpapi.workflows.filter.keep_slabs_with_miller_indices + ocpapi.workflows.filter.prompt_for_slabs_to_keep + + + + +.. py:class:: keep_all_slabs + + + Adslab filter than returns all slabs. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) + + + Adslab filter that keeps any slabs with the configured miller indices. + Slabs with other miller indices will be ignored. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: prompt_for_slabs_to_keep + + + Adslab filter than presents the user with an interactive prompt to choose + which of the input slabs to keep. + + .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] + :staticmethod: + + Generates a sort key from the input adslab. Returns the miller indices, + shift, and top/bottom label so that they will be sorted by those values + in that order. + + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + diff --git a/_sources/autoapi/ocpapi/workflows/index.rst b/_sources/autoapi/ocpapi/workflows/index.rst new file mode 100644 index 000000000..a12c9e5a5 --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/index.rst @@ -0,0 +1,331 @@ +:py:mod:`ocpapi.workflows` +========================== + +.. py:module:: ocpapi.workflows + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + adsorbates/index.rst + context/index.rst + filter/index.rst + log/index.rst + retry/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.AdsorbateBindingSites + ocpapi.workflows.AdsorbateSlabRelaxations + ocpapi.workflows.Lifetime + ocpapi.workflows.keep_all_slabs + ocpapi.workflows.keep_slabs_with_miller_indices + ocpapi.workflows.prompt_for_slabs_to_keep + ocpapi.workflows.RateLimitLogging + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.find_adsorbate_binding_sites + ocpapi.workflows.get_adsorbate_slab_relaxation_results + ocpapi.workflows.wait_for_adsorbate_slab_relaxations + ocpapi.workflows.retry_api_calls + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.NO_LIMIT + ocpapi.workflows.NoLimitType + + +.. py:class:: AdsorbateBindingSites + + + Stores the inputs and results of a set of relaxations of adsorbate + placements on the surface of a slab. + + .. py:attribute:: adsorbate + :type: str + + Description of the adsorbate. + + .. py:attribute:: bulk + :type: fairchem.demo.ocpapi.client.Bulk + + The bulk material that was being modeled. + + .. py:attribute:: model + :type: str + + The type of the model that was run. + + .. py:attribute:: slabs + :type: List[AdsorbateSlabRelaxations] + + The list of slabs that were generated from the bulk structure. Each + contains its own list of adsorbate placements. + + +.. py:class:: AdsorbateSlabRelaxations + + + Stores the relaxations of adsorbate placements on the surface of a slab. + + .. py:attribute:: slab + :type: fairchem.demo.ocpapi.client.Slab + + The slab on which the adsorbate was placed. + + .. py:attribute:: configs + :type: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + + Details of the relaxation of each adsorbate placement, including the + final position. + + .. py:attribute:: system_id + :type: str + + The ID of the system that stores all of the relaxations. + + .. py:attribute:: api_host + :type: str + + The API host on which the relaxations were run. + + .. py:attribute:: ui_url + :type: Optional[str] + + The URL at which results can be visualized. + + +.. py:class:: Lifetime(*args, **kwds) + + + Bases: :py:obj:`enum.Enum` + + Represents different lifetimes when running relaxations. + + .. py:attribute:: SAVE + + The relaxation will be available on API servers indefinitely. It will not + be possible to delete the relaxation in the future. + + .. py:attribute:: MARK_EPHEMERAL + + The relaxation will be saved on API servers, but can be deleted at any time + in the future. + + .. py:attribute:: DELETE + + The relaxation will be deleted from API servers as soon as the results have + been fetched. + + +.. py:exception:: UnsupportedAdsorbateException(adsorbate: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when an adsorbate is not supported in the API. + + +.. py:exception:: UnsupportedBulkException(bulk: str) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a bulk material is not supported in the API. + + +.. py:exception:: UnsupportedModelException(model: str, allowed_models: List[str]) + + + Bases: :py:obj:`AdsorbatesException` + + Exception raised when a model is not supported in the API. + + +.. py:function:: find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) -> AdsorbateBindingSites + :async: + + Search for adsorbate binding sites on surfaces of a bulk material. + This executes the following steps: + + 1. Ensure that both the adsorbate and bulk are supported in the + OCP API. + 2. Enumerate unique surfaces from the bulk material. + 3. Enumerate likely binding sites for the input adsorbate on each + of the generated surfaces. + 4. Filter the list of generated adsorbate/slab (adslab) configurations + using the input adslab_filter. + 5. Relax each generated surface+adsorbate structure by refining + atomic positions to minimize forces generated by the input model. + + :param adsorbate: Description of the adsorbate to place. + :param bulk: The ID (typically Materials Project MP ID) of the bulk material + on which the adsorbate will be placed. + :param model: The type of the model to use when calculating forces during + relaxations. + :param adslab_filter: A function that modifies the set of adsorbate/slab + configurations that will be relaxed. This can be used to subselect + slabs and/or adsorbate configurations. + :param client: The OCP API client to use. + :param lifetime: Whether relaxations should be saved on the server, be marked + as ephemeral (allowing them to deleted in the future), or deleted + immediately. + + :returns: Details of each adsorbate binding site, including results of relaxing + to locally-optimized positions using the input model. + + :raises UnsupportedModelException: If the requested model is not supported. + :raises UnsupportedBulkException: If the requested bulk is not supported. + :raises UnsupportedAdsorbateException: If the requested adsorbate is not + supported. + + +.. py:function:: get_adsorbate_slab_relaxation_results(system_id: str, config_ids: Optional[List[int]] = None, fields: Optional[List[str]] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult] + :async: + + Wrapper around Client.get_adsorbate_slab_relaxations_results() that + handles retries, including re-fetching individual configurations that + are initially omitted. + + :param client: The client to use when making API calls. + :param system_id: The system ID of the relaxations. + :param config_ids: If defined and not empty, a subset of configurations + to fetch. Otherwise all configurations are returned. + :param fields: If defined and not empty, a subset of fields in each + configuration to fetch. Otherwise all fields are returned. + + :returns: List of relaxation results, one for each adsorbate configuration in + the system. + + +.. py:function:: wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: Optional[tqdm.tqdm] = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) -> Dict[int, fairchem.demo.ocpapi.client.Status] + :async: + + Blocks until all relaxations in the input system have finished, whether + successfully or not. + + Relaxations are queued in the API, waiting until machines are ready to + run them. Once started, they can take 1-2 minutes to finish. This method + initially sleeps "slow_interval_sec" seconds between each check for any + relaxations having finished. Once at least one result is ready, subsequent + sleeps are for "fast_interval_sec" seconds. + + :param system_id: The ID of the system for which relaxations are running. + :param check_immediately: If False (default), sleep before the first check + for relaxations having finished. If True, check whether relaxations + have finished immediately on entering this function. + :param slow_interval_sec: The number of seconds to wait between each check + while all are still running. + :param fast_interval_sec: The number of seconds to wait between each check + when at least one relaxation has finished in the system. + :param pbar: A tqdm instance that tracks the number of configurations that + have finished. This will be updated with the number of individual + configurations whose relaxations have finished. + :param client: The client to use when making API calls. + + :returns: Map of config IDs in the system to their terminal status. + + +.. py:class:: keep_all_slabs + + + Adslab filter than returns all slabs. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]]) + + + Adslab filter that keeps any slabs with the configured miller indices. + Slabs with other miller indices will be ignored. + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:class:: prompt_for_slabs_to_keep + + + Adslab filter than presents the user with an interactive prompt to choose + which of the input slabs to keep. + + .. py:method:: _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) -> Tuple[Tuple[int, int, int], float, str] + :staticmethod: + + Generates a sort key from the input adslab. Returns the miller indices, + shift, and top/bottom label so that they will be sorted by those values + in that order. + + + .. py:method:: __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) -> List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs] + :async: + + + +.. py:data:: NO_LIMIT + :type: NoLimitType + :value: 0 + + + +.. py:data:: NoLimitType + + + +.. py:class:: RateLimitLogging + + + Controls logging when rate limits are hit. + + .. py:attribute:: logger + :type: logging.Logger + + The logger to use. + + .. py:attribute:: action + :type: str + + A short description of the action being attempted. + + +.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any + + Decorator with sensible defaults for retrying calls to the OCP API. + + :param max_attempts: The maximum number of calls to make. If NO_LIMIT, + retries will be made forever. + :param rate_limit_logging: If not None, log statements will be generated + using this configuration when a rate limit is hit. + :param fixed_wait_sec: The fixed number of seconds to wait when retrying an + exception that does *not* include a retry-after value. The default + value is sensible; this is exposed mostly for testing. + :param max_jitter_sec: The maximum number of seconds that will be randomly + added to wait times. The default value is sensible; this is exposed + mostly for testing. + + diff --git a/_sources/autoapi/ocpapi/workflows/log/index.rst b/_sources/autoapi/ocpapi/workflows/log/index.rst new file mode 100644 index 000000000..0f03bb028 --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/log/index.rst @@ -0,0 +1,13 @@ +:py:mod:`ocpapi.workflows.log` +============================== + +.. py:module:: ocpapi.workflows.log + + +Module Contents +--------------- + +.. py:data:: log + + + diff --git a/_sources/autoapi/ocpapi/workflows/retry/index.rst b/_sources/autoapi/ocpapi/workflows/retry/index.rst new file mode 100644 index 000000000..5fc4b2c7c --- /dev/null +++ b/_sources/autoapi/ocpapi/workflows/retry/index.rst @@ -0,0 +1,95 @@ +:py:mod:`ocpapi.workflows.retry` +================================ + +.. py:module:: ocpapi.workflows.retry + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.retry.RateLimitLogging + ocpapi.workflows.retry._wait_check_retry_after + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.retry.retry_api_calls + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpapi.workflows.retry.NoLimitType + ocpapi.workflows.retry.NO_LIMIT + + +.. py:class:: RateLimitLogging + + + Controls logging when rate limits are hit. + + .. py:attribute:: logger + :type: logging.Logger + + The logger to use. + + .. py:attribute:: action + :type: str + + A short description of the action being attempted. + + +.. py:class:: _wait_check_retry_after(default_wait: tenacity.wait.wait_base, rate_limit_logging: Optional[RateLimitLogging] = None) + + + Bases: :py:obj:`tenacity.wait.wait_base` + + Tenacity wait strategy that first checks whether RateLimitExceededException + was raised and that it includes a retry-after value; if so wait, for that + amount of time. Otherwise, fall back to the provided default strategy. + + .. py:method:: __call__(retry_state: tenacity.RetryCallState) -> float + + If a RateLimitExceededException was raised and has a retry_after value, + return it. Otherwise use the default waiter method. + + + +.. py:data:: NoLimitType + + + +.. py:data:: NO_LIMIT + :type: NoLimitType + :value: 0 + + + +.. py:function:: retry_api_calls(max_attempts: Union[int, NoLimitType] = 3, rate_limit_logging: Optional[RateLimitLogging] = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) -> Any + + Decorator with sensible defaults for retrying calls to the OCP API. + + :param max_attempts: The maximum number of calls to make. If NO_LIMIT, + retries will be made forever. + :param rate_limit_logging: If not None, log statements will be generated + using this configuration when a rate limit is hit. + :param fixed_wait_sec: The fixed number of seconds to wait when retrying an + exception that does *not* include a retry-after value. The default + value is sensible; this is exposed mostly for testing. + :param max_jitter_sec: The maximum number of seconds that will be randomly + added to wait times. The default value is sensible; this is exposed + mostly for testing. + + diff --git a/_sources/autoapi/ocpneb/core/autoframe/index.rst b/_sources/autoapi/ocpneb/core/autoframe/index.rst new file mode 100644 index 000000000..372353a51 --- /dev/null +++ b/_sources/autoapi/ocpneb/core/autoframe/index.rst @@ -0,0 +1,533 @@ +:py:mod:`ocpneb.core.autoframe` +=============================== + +.. py:module:: ocpneb.core.autoframe + +.. autoapi-nested-parse:: + + Home of the AutoFrame classes which facillitate the generation of initial + and final frames for NEB calculations. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpneb.core.autoframe.AutoFrame + ocpneb.core.autoframe.AutoFrameDissociation + ocpneb.core.autoframe.AutoFrameTransfer + ocpneb.core.autoframe.AutoFrameDesorption + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpneb.core.autoframe.interpolate_and_correct_frames + ocpneb.core.autoframe.get_shortest_path + ocpneb.core.autoframe.traverse_adsorbate_transfer + ocpneb.core.autoframe.traverse_adsorbate_dissociation + ocpneb.core.autoframe.traverse_adsorbate_desorption + ocpneb.core.autoframe.get_product2_idx + ocpneb.core.autoframe.traverse_adsorbate_general + ocpneb.core.autoframe.unwrap_atoms + ocpneb.core.autoframe.interpolate + ocpneb.core.autoframe.is_edge_list_respected + ocpneb.core.autoframe.reorder_edge_list + ocpneb.core.autoframe.is_adsorbate_adsorbed + + + +.. py:class:: AutoFrame + + + Base class to hold functions that are shared across the reaction types. + + .. py:method:: reorder_adsorbate(frame: ase.Atoms, idx_mapping: dict) + + Given the adsorbate mapping, reorder the adsorbate atoms in the final frame so that + they match the initial frame to facillitate proper interpolation. + + :param frame: the atoms object for which the adsorbate will be reordered + :type frame: ase.Atoms + :param idx_mapping: the index mapping to reorder things + :type idx_mapping: dict + + :returns: the reordered adsorbate-slab configuration + :rtype: ase.Atoms + + + .. py:method:: only_keep_unique_systems(systems, energies) + + Remove duplicate systems from `systems` and `energies`. + + :param systems: the systems to remove duplicates from + :type systems: list[ase.Atoms] + :param energies: the energies to remove duplicates from + :type energies: list[float] + + :returns: the systems with duplicates removed + list[float]: the energies with duplicates removed + :rtype: list[ase.Atoms] + + + .. py:method:: get_most_proximate_symmetric_group(initial: ase.Atoms, frame: ase.Atoms) + + For cases where the adsorbate has symmetry and the leaving group could be different + atoms / sets of atoms, determine which one make the most sense given the geometry of + the initial and final frames. This is done by minimizing the total distance traveled + by all atoms from initial to final frame. + + :param initial: the initial adsorbate-surface configuration + :type initial: ase.Atoms + :param frame: the final adsorbate-surface configuration being considered. + :type frame: ase.Atoms + + :returns: the mapping to be used which specifies the most apt leaving group + int: the index of the mapping to be used + :rtype: dict + + + .. py:method:: are_all_adsorbate_atoms_overlapping(adsorbate1: ase.Atoms, adsorbate2: ase.Atoms) + + Test to see if all the adsorbate atoms are intersecting to find unique structures. + Systems where they are overlapping are considered the same. + + :param adsorbate1: just the adsorbate atoms of a structure that is being + compared + :type adsorbate1: ase.Atoms + :param adsorbate2: just the adsorbate atoms of the other structure that + is being compared + :type adsorbate2: ase.Atoms + + :returns: + + True if all adsorbate atoms are overlapping (structure is a match) + False if one or more of the adsorbate atoms do not overlap + :rtype: (bool) + + + +.. py:class:: AutoFrameDissociation(reaction: ocpneb.core.Reaction, reactant_system: ase.Atoms, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_product1_max: float = None, r_product2_max: float = None, r_product2_min: float = None) + + + Bases: :py:obj:`AutoFrame` + + Base class to hold functions that are shared across the reaction types. + + .. py:method:: get_neb_frames(calculator, n_frames: int = 5, n_pdt1_sites: int = 5, n_pdt2_sites: int = 5, fmax: float = 0.05, steps: int = 200) + + Propose final frames for NEB calculations. Perform a relaxation on the final + frame using the calculator provided. Interpolate between the initial + and final frames for a proposed reaction trajectory. Correct the trajectory if + there is any atomic overlap. + + :param calculator: an ase compatible calculator to be used to relax the final frame. + :param n_frames: the number of frames per reaction trajectory + :type n_frames: int + :param n_pdt1_sites: The number of product 1 sites to consider + :type n_pdt1_sites: int + :param n_pdt2_sites: The number of product 2 sites to consider. Note this is + multiplicative with `n_pdt1_sites` (i.e. if `n_pdt1_sites` = 2 and + `n_pdt2_sites` = 3 then a total of 6 final frames will be proposed) + :type n_pdt2_sites: int + :param fmax: force convergence criterion for final frame optimization + :type fmax: float + :param steps: step number termination criterion for final frame optimization + :type steps: int + + :returns: the initial reaction coordinates + :rtype: list[lists] + + + .. py:method:: get_best_sites_for_product1(n_sites: int = 5) + + Wrapper to find product 1 placements to be considered for the final frame + of the NEB. + + :param n_sites: The number of sites for product 1 to consider. Notice this is + multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) + then a total of 6 initial and final frames will be considered. + :type n_sites: int + + :returns: + + the lowest energy, proximate placements of product + 1 to be used in the final NEB frames + :rtype: (list[ase.Atoms]) + + + .. py:method:: get_best_unique_sites_for_product2(product1: ase.Atoms, n_sites: int = 5) + + Wrapper to find product 2 placements to be considered for the final frame + of the NEB. + + :param product1: The atoms object of the product 1 placement that will be + considered in this function to search for product 1 + product 2 combinations + for the final frame. + :type product1: ase.Atoms + :param n_sites: The number of sites for product 1 to consider. Notice this is + multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) + then a total of 6 initial and final frames will be considered. + :type n_sites: int + + :returns: + + the lowest energy, proximate placements of product + 2 to be used in the final NEB frames + :rtype: (list[ase.Atoms]) + + + .. py:method:: get_sites_within_r(center_coordinate: numpy.ndarray, all_systems: list, all_system_energies: list, all_systems_binding_idx: int, allowed_radius_max: float, allowed_radius_min: float, n_sites: int = 5) + + Get the n lowest energy, sites of the systems within r. For now n is + 5 or < 5 if there are fewer than 5 unique sites within r. + + :param center_coordinate: the coordinate about which r should be + centered. + :type center_coordinate: np.ndarray + :param all_systems: the list of all systems to be assessed for their + uniqueness and proximity to the center coordinate. + :type all_systems: list + :param all_systems_binding_idx: the idx of the adsorbate atom that is + bound in `all_systems` + :type all_systems_binding_idx: int + :param allowed_radius_max: the outer radius about `center_coordinate` + in which the adsorbate must lie to be considered. + :type allowed_radius_max: float + :param allowed_radius_min: the inner radius about `center_coordinate` + which the adsorbate must lie outside of to be considered. + :type allowed_radius_min: float + :param n_sites: the number of unique sites in r that will be chosen. + :type n_sites: int + + :returns: list of systems identified as candidates. + :rtype: (list[ase.Atoms]) + + + +.. py:class:: AutoFrameTransfer(reaction: ocpneb.core.Reaction, reactant1_systems: list, reactant2_systems: list, reactant1_energies: list, reactant2_energies: list, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_traverse_max: float, r_react_max: float, r_react_min: float) + + + Bases: :py:obj:`AutoFrame` + + Base class to hold functions that are shared across the reaction types. + + .. py:method:: get_neb_frames(calculator, n_frames: int = 10, n_initial_frames: int = 5, n_final_frames_per_initial: int = 5, fmax: float = 0.05, steps: int = 200) + + Propose final frames for NEB calculations. Perform a relaxation on the final + frame using the calculator provided. Linearly interpolate between the initial + and final frames for a proposed reaction trajectory. Correct the trajectory if + there is any atomic overlap. + + :param calculator: an ase compatible calculator to be used to relax the initial and + final frames. + :param n_frames: the number of frames per reaction trajectory + :type n_frames: int + :param n_initial_frames: The number of initial frames to consider + :type n_initial_frames: int + :param n_final_frames_per_initial: The number of final frames per inital frame to consider + :type n_final_frames_per_initial: int + :param fmax: force convergence criterion for final frame optimization + :type fmax: float + :param steps: step number termination criterion for final frame optimization + :type steps: int + + :returns: the initial reaction coordinates + :rtype: list[lists] + + + .. py:method:: get_system_pairs_initial() + + Get the initial frames for the NEB. This is done by finding the closest + pair of systems from `systems1` and `systems2` for which the interstitial distance + between all adsorbate atoms is less than `rmax` and greater than `rmin`. + + :returns: the initial frames for the NEB + list[float]: the pseudo energies of the initial frames (i.e just the sum of the + individual adsorption energies) + :rtype: list[ase.Atoms] + + + .. py:method:: get_system_pairs_final(system1_coord, system2_coord) + + Get the final frames for the NEB. This is done by finding the closest + pair of systems from `systems1` and `systems2` for which the distance + traversed by the adsorbate from the initial frame to the final frame is + less than `rmax` and the minimum interstitial distance between the two + products in greater than `rmin`. + + :returns: the initial frames for the NEB + list[float]: the pseudo energies of the initial frames + :rtype: list[ase.Atoms] + + + +.. py:class:: AutoFrameDesorption(reaction: ocpneb.core.Reaction, reactant_systems: list, reactant_energies: list, z_desorption: float) + + + Bases: :py:obj:`AutoFrame` + + Base class to hold functions that are shared across the reaction types. + + .. py:method:: get_neb_frames(calculator, n_frames: int = 5, n_systems: int = 5, fmax: float = 0.05, steps: int = 200) + + Propose final frames for NEB calculations. Perform a relaxation on the final + frame using the calculator provided. Linearly interpolate between the initial + and final frames for a proposed reaction trajectory. Correct the trajectory if + there is any atomic overlap. + + :param calculator: an ase compatible calculator to be used to relax the final frame. + :param n_frames: the number of frames per reaction trajectory + :type n_frames: int + :param n_pdt1_sites: The number of product 1 sites to consider + :type n_pdt1_sites: int + :param n_pdt2_sites: The number of product 2 sites to consider. Note this is + multiplicative with `n_pdt1_sites` (i.e. if `n_pdt1_sites` = 2 and + `n_pdt2_sites` = 3 then a total of 6 final frames will be proposed) + :type n_pdt2_sites: int + :param fmax: force convergence criterion for final frame optimization + :type fmax: float + :param steps: step number termination criterion for final frame optimization + :type steps: int + + :returns: the initial reaction coordinates + :rtype: list[lists] + + + +.. py:function:: interpolate_and_correct_frames(initial: ase.Atoms, final: ase.Atoms, n_frames: int, reaction: ocpneb.core.Reaction, map_idx: int) + + Given the initial and final frames, perform the following: + (1) Unwrap the final frame if it is wrapped around the cell + (2) Interpolate between the initial and final frames + + :param initial: the initial frame of the NEB + :type initial: ase.Atoms + :param final: the proposed final frame of the NEB + :type final: ase.Atoms + :param n_frames: The desired number of frames for the NEB (not including initial and final) + :type n_frames: int + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param map_idx: the index of the mapping to use for the final frame + :type map_idx: int + + +.. py:function:: get_shortest_path(initial: ase.Atoms, final: ase.Atoms) + + Find the shortest path for all atoms about pbc and reorient the final frame so the + atoms align with this shortest path. This allows us to perform a linear interpolation + that does not interpolate jumps across pbc. + + :param initial: the initial frame of the NEB + :type initial: ase.Atoms + :param final: the proposed final frame of the NEB to be corrected + :type final: ase.Atoms + + :returns: the corrected final frame + (ase.Atoms): the initial frame tiled (3,3,1), which is used it later steps + (ase.Atoms): the final frame tiled (3,3,1), which is used it later steps + :rtype: (ase.Atoms) + + +.. py:function:: traverse_adsorbate_transfer(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: list) + + Traverse reactant 1, reactant 2, product 1 and product 2 in a depth first search of + the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures + that when we perform the linear interpolation, the adsorbate moves as a single moity + and avoids accidental bond breaking events over pbc. + + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param initial: the initial frame of the NEB + :type initial: ase.Atoms + :param final: the proposed final frame of the NEB to be corrected + :type final: ase.Atoms + :param initial_tiled: the initial frame tiled (3,3,1) + :type initial_tiled: ase.Atoms + :param final_tiled: the final frame tiled (3,3,1) + :type final_tiled: ase.Atoms + :param edge_list_final: the edge list of the final frame corrected with mapping + idx changes + :type edge_list_final: list + + :returns: the corrected initial frame + (ase.Atoms): the corrected final frame + :rtype: (ase.Atoms) + + +.. py:function:: traverse_adsorbate_dissociation(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: int) + + Traverse reactant 1, product 1 and product 2 in a depth first search of + the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures + that when we perform the linear interpolation, the adsorbate moves as a single moity + and avoids accidental bond breaking events over pbc. + + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param initial: the initial frame of the NEB + :type initial: ase.Atoms + :param final: the proposed final frame of the NEB to be corrected + :type final: ase.Atoms + :param initial_tiled: the initial frame tiled (3,3,1) + :type initial_tiled: ase.Atoms + :param final_tiled: the final frame tiled (3,3,1) + :type final_tiled: ase.Atoms + :param edge_list_final: the edge list of the final frame corrected with mapping + idx changes + :type edge_list_final: list + + :returns: the corrected initial frame + (ase.Atoms): the corrected final frame + :rtype: (ase.Atoms) + + +.. py:function:: traverse_adsorbate_desorption(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms) + + Traverse reactant 1 and product 1 in a depth first search of + the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures + that when we perform the linear interpolation, the adsorbate moves as a single moity + and avoids accidental bond breaking events over pbc. + + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param initial: the initial frame of the NEB + :type initial: ase.Atoms + :param final: the proposed final frame of the NEB to be corrected + :type final: ase.Atoms + :param initial_tiled: the initial frame tiled (3,3,1) + :type initial_tiled: ase.Atoms + :param final_tiled: the final frame tiled (3,3,1) + :type final_tiled: ase.Atoms + :param edge_list_final: the edge list of the final frame corrected with mapping + idx changes + :type edge_list_final: list + + :returns: the corrected initial frame + (ase.Atoms): the corrected final frame + :rtype: (ase.Atoms) + + +.. py:function:: get_product2_idx(reaction: ocpneb.core.Reaction, edge_list_final: list, traversal_rxt1_final: list) + + For dissociation only. Use the information about the initial edge list and final edge + list to determine which atom in product 2 lost a bond in the reaction and use this + as the binding index for traversal in `traverse_adsorbate_dissociation`. + + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param edge_list_final: the edge list of the final frame corrected with mapping + idx changes + :type edge_list_final: list + :param traversal_rxt1_final: the traversal of reactant 1 for the final frame + :type traversal_rxt1_final: list + + :returns: the binding index of product 2 + :rtype: (int) + + +.. py:function:: traverse_adsorbate_general(traversal_rxt, slab_len: int, starting_node_idx: int, equivalent_idx_factors: numpy.ndarray, frame: ase.Atoms, frame_tiled: ase.Atoms) + + Perform the traversal to reposition atoms so that the distance along bonds is + minimized. + + :param traversal_rxt: the traversal of the adsorbate to be traversed. It is + the list of edges ordered by depth first search. + :type traversal_rxt: list + :param slab_len: the number of atoms in the slab + :type slab_len: int + :param starting_node_idx: the index of the atom to start the traversal from + :type starting_node_idx: int + :param equivalent_idx_factors: the values to add to the untiled index + which gives equivalent indices (i.e. copies of that atom in the tiled system) + :type equivalent_idx_factors: np.ndarray + :param frame: the frame to be corrected + :type frame: ase.Atoms + :param frame_tiled: the tiled (3,3,1) version of the frame which will be + corrected + :type frame_tiled: ase.Atoms + + :returns: the corrected frame + :rtype: (ase.Atoms) + + +.. py:function:: unwrap_atoms(initial: ase.Atoms, final: ase.Atoms, reaction: ocpneb.core.Reaction, map_idx: int) + + Make corrections to the final frame so it is no longer wrapped around the cell, + if it has jumpped over the pbc. Ensure that for each adsorbate moity, absolute bond distances + for all edges that exist in the initial and final frames are minimize regardles of cell location. + This enforces the traversal of the adsorbates happens along the same path, which is not + necessarily the minimum distance path for each atom. Changes are made in place. + + :param initial: the initial atoms object to which the final atoms should + be proximate + :type initial: ase.Atoms + :param final: the final atoms object to be corrected + :type final: ase.Atoms + :param reaction: the reaction object which provides pertinent info + :type reaction: ocpneb.core.Reaction + :param map_idx: the index of the mapping to use for the final frame + :type map_idx: int + + +.. py:function:: interpolate(initial_frame: ase.Atoms, final_frame: ase.Atoms, num_frames: int) + + Interpolate between the initial and final frames starting with a linear interpolation + along the atom-wise vectors from initial to final. Then iteratively correct the + positions so atomic overlap is avoided/ reduced. When iteratively updating, the + positions of adjacent frames are considered to avoid large jumps in the trajectory. + + :param initial_frame: the initial frame which will be interpolated from + :type initial_frame: ase.Atoms + :param final_frame: the final frame which will be interpolated to + :type final_frame: ase.Atoms + :param num_frames: the number of frames to be interpolated between the initial + :type num_frames: int + + :returns: the interpolated frames + :rtype: (list[ase.Atoms]) + + +.. py:function:: is_edge_list_respected(frame: ase.Atoms, edge_list: list) + + Check to see that the expected adsorbate-adsorbate edges are found and no additional + edges exist between the adsorbate atoms. + + :param frame: the atoms object for which edges will be checked. + This must comply with ocp tagging conventions. + :type frame: ase.Atoms + :param edge_list: The expected edges + :type edge_list: list[tuples] + + +.. py:function:: reorder_edge_list(edge_list: list, mapping: dict) + + For the final edge list, apply the mapping so the edges correspond to the correctly + concatenated object. + + :param edge_list: the final edgelist + :type edge_list: list[tuples] + :param mapping: the mapping so the final atoms concatenated have indices that correctly map + to the initial atoms. + + +.. py:function:: is_adsorbate_adsorbed(adsorbate_slab_config: ase.Atoms) + + Check to see if the adsorbate is adsorbed on the surface. + + :param adsorbate_slab_config: the combined adsorbate and slab configuration + with adsorbate atoms tagged as 2s and surface atoms tagged as 1s. + :type adsorbate_slab_config: ase.Atoms + + :returns: True if the adsorbate is adsorbed, False otherwise. + :rtype: (bool) + + diff --git a/_sources/autoapi/ocpneb/core/index.rst b/_sources/autoapi/ocpneb/core/index.rst new file mode 100644 index 000000000..594b121ef --- /dev/null +++ b/_sources/autoapi/ocpneb/core/index.rst @@ -0,0 +1,67 @@ +:py:mod:`ocpneb.core` +===================== + +.. py:module:: ocpneb.core + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + autoframe/index.rst + ocpneb/index.rst + reaction/index.rst + + +Package Contents +---------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpneb.core.Reaction + ocpneb.core.OCPNEB + + + + +.. py:class:: Reaction(reaction_db_path: str, adsorbate_db_path: str, reaction_id_from_db: int = None, reaction_str_from_db: str = None, reaction_type: str = None) + + + Initialize Reaction object + + .. py:method:: get_desorption_mapping(reactant) + + Get mapping for desorption reaction + + + +.. py:class:: OCPNEB(images, checkpoint_path, k=0.1, fmax=0.05, climb=False, parallel=False, remove_rotation_and_translation=False, world=None, dynamic_relaxation=True, scale_fmax=0.0, method='aseneb', allow_shared_calculator=False, precon=None, cpu=False, batch_size=4) + + + Bases: :py:obj:`ase.neb.DyNEB` + + .. py:method:: load_checkpoint(checkpoint_path: str) -> None + + Load existing trained model + + :param checkpoint_path: string + Path to trained model + + + .. py:method:: get_forces() + + Evaluate and return the forces. + + + .. py:method:: set_positions(positions) + + + .. py:method:: get_precon_forces(forces, energies, images) + + + diff --git a/_sources/autoapi/ocpneb/core/ocpneb/index.rst b/_sources/autoapi/ocpneb/core/ocpneb/index.rst new file mode 100644 index 000000000..d16a00121 --- /dev/null +++ b/_sources/autoapi/ocpneb/core/ocpneb/index.rst @@ -0,0 +1,44 @@ +:py:mod:`ocpneb.core.ocpneb` +============================ + +.. py:module:: ocpneb.core.ocpneb + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpneb.core.ocpneb.OCPNEB + + + + +.. py:class:: OCPNEB(images, checkpoint_path, k=0.1, fmax=0.05, climb=False, parallel=False, remove_rotation_and_translation=False, world=None, dynamic_relaxation=True, scale_fmax=0.0, method='aseneb', allow_shared_calculator=False, precon=None, cpu=False, batch_size=4) + + + Bases: :py:obj:`ase.neb.DyNEB` + + .. py:method:: load_checkpoint(checkpoint_path: str) -> None + + Load existing trained model + + :param checkpoint_path: string + Path to trained model + + + .. py:method:: get_forces() + + Evaluate and return the forces. + + + .. py:method:: set_positions(positions) + + + .. py:method:: get_precon_forces(forces, energies, images) + + + diff --git a/_sources/autoapi/ocpneb/core/reaction/index.rst b/_sources/autoapi/ocpneb/core/reaction/index.rst new file mode 100644 index 000000000..91927d10e --- /dev/null +++ b/_sources/autoapi/ocpneb/core/reaction/index.rst @@ -0,0 +1,30 @@ +:py:mod:`ocpneb.core.reaction` +============================== + +.. py:module:: ocpneb.core.reaction + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + ocpneb.core.reaction.Reaction + + + + +.. py:class:: Reaction(reaction_db_path: str, adsorbate_db_path: str, reaction_id_from_db: int = None, reaction_str_from_db: str = None, reaction_type: str = None) + + + Initialize Reaction object + + .. py:method:: get_desorption_mapping(reactant) + + Get mapping for desorption reaction + + + diff --git a/_sources/autoapi/ocpneb/databases/index.rst b/_sources/autoapi/ocpneb/databases/index.rst new file mode 100644 index 000000000..92cbb90a0 --- /dev/null +++ b/_sources/autoapi/ocpneb/databases/index.rst @@ -0,0 +1,21 @@ +:py:mod:`ocpneb.databases` +========================== + +.. py:module:: ocpneb.databases + + +Package Contents +---------------- + +.. py:data:: DISSOCIATION_REACTION_DB_PATH + + + +.. py:data:: DESORPTION_REACTION_DB_PATH + + + +.. py:data:: TRANSFER_REACTION_DB_PATH + + + diff --git a/_sources/autoapi/ocpneb/run_validation/run_validation/index.rst b/_sources/autoapi/ocpneb/run_validation/run_validation/index.rst new file mode 100644 index 000000000..def47087c --- /dev/null +++ b/_sources/autoapi/ocpneb/run_validation/run_validation/index.rst @@ -0,0 +1,165 @@ +:py:mod:`ocpneb.run_validation.run_validation` +============================================== + +.. py:module:: ocpneb.run_validation.run_validation + +.. autoapi-nested-parse:: + + A python script to run a validation of the ML NEB model on a set of NEB calculations. + This script has not been written to run in parallel, but should be modified to do so. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + ocpneb.run_validation.run_validation.get_results_sp + ocpneb.run_validation.run_validation.get_results_ml + ocpneb.run_validation.run_validation.all_converged + ocpneb.run_validation.run_validation.both_barrierless + ocpneb.run_validation.run_validation.both_barriered + ocpneb.run_validation.run_validation.barrierless_converged + ocpneb.run_validation.run_validation.is_failed_sp + ocpneb.run_validation.run_validation.parse_neb_info + ocpneb.run_validation.run_validation.get_single_point + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + ocpneb.run_validation.run_validation.parser + + +.. py:function:: get_results_sp(df2: pandas.DataFrame) + + Get the % success and % convergence for the model considered with + single points performed on the transition states. + + :param df2: The dataframe containing the results of the + NEB calculations. + :type df2: pd.DataFrame + + :returns: + + a tuple of strings containing the % success and + % convergence + :rtype: (tuple[str]) + + +.. py:function:: get_results_ml(df2) + + Get the % success and % convergence for the model considered with + just ML energy and force calls. + + :param df2: The dataframe containing the results of the + NEB calculations. + :type df2: pd.DataFrame + + :returns: + + a tuple of strings containing the % success and + % convergence + :rtype: (tuple[str]) + + +.. py:function:: all_converged(row, ml=True) + + Dataframe function which makes the job of filtering to get % success cleaner. + It assesses the convergence. + + :param row: the dataframe row which the function is applied to + :param ml: boolean value. If `True` just the ML NEB and DFT NEB convergence are + considered. If `False`, the single point convergence is also considered. + + :returns: whether the system is converged + :rtype: bool + + +.. py:function:: both_barrierless(row) + + Dataframe function which makes the job of filtering to get % success cleaner. + It assesses if both DFT and ML find a barrierless transition state. + + :param row: the dataframe row which the function is applied to + + :returns: True if both ML and DFT find a barrierless transition state, False otherwise + :rtype: bool + + +.. py:function:: both_barriered(row) + + Dataframe function which makes the job of filtering to get % success cleaner. + It assesses if both DFT and ML find a barriered transition state. + + :param row: the dataframe row which the function is applied to + + :returns: True if both ML and DFT find a barriered transition state, False otherwise + :rtype: bool + + +.. py:function:: barrierless_converged(row) + + Dataframe function which makes the job of filtering to get % success cleaner. + It assesses if both DFT and ML find a barrierless, converged transition state. + + :param row: the dataframe row which the function is applied to + + :returns: + + True if both ML and DFT find a barrierless converged transition state, + False otherwise + :rtype: bool + + +.. py:function:: is_failed_sp(row) + + Dataframe function which makes the job of filtering to get % success cleaner. + It assesses if the single point failed. + + :param row: the dataframe row which the function is applied to + + :returns: True if ths single point failed, otherwise False + :rtype: bool + + +.. py:function:: parse_neb_info(neb_frames: list, calc, conv: bool, entry: dict) + + At the conclusion of the ML NEB, this function processes the important + results and adds them to the entry dictionary. + + :param neb_frames: the ML relaxed NEB frames + :type neb_frames: list[ase.Atoms] + :param calc: the ocp ase Atoms calculator + :param conv: whether or not the NEB achieved forces below the threshold within + the number of allowed steps + :type conv: bool + :param entry: the entry corresponding to the NEB performed + :type entry: dict + + +.. py:function:: get_single_point(atoms: ase.Atoms, vasp_dir: str, vasp_flags: dict, vasp_command: str) + + Gets a single point on the atoms passed. + + :param atoms: the atoms object on which the single point will be performed + :type atoms: ase.Atoms + :param vasp_dir: the path where the vasp files should be written + :type vasp_dir: str + :param vasp_flags: a dictionary of the vasp INCAR flags + :param vasp_command: the + :type vasp_command: str + + +.. py:data:: parser + + + diff --git a/_sources/core/ase_dataset_creation.md b/_sources/core/ase_dataset_creation.md index 8c6748692..df6661fed 100644 --- a/_sources/core/ase_dataset_creation.md +++ b/_sources/core/ase_dataset_creation.md @@ -1,7 +1,7 @@ # Making and using ASE datasets -There are multiple ways to train and evaluate OCP models on data other than OC20 and OC22. Writing an LMDB is the most performant option. However, ASE-based dataset formats are also included as a convenience for people with existing data who simply want to try OCP tools without needing to learn about LMDBs. +There are multiple ways to train and evaluate FAIRChem models on data other than OC20 and OC22. Writing an LMDB is the most performant option. However, ASE-based dataset formats are also included as a convenience for people with existing data who simply want to try fairchem tools without needing to learn about LMDBs. ## Using an ASE Database diff --git a/_sources/core/datasets/oc20.md b/_sources/core/datasets/oc20.md index 66584cf35..dc0aae4ef 100644 --- a/_sources/core/datasets/oc20.md +++ b/_sources/core/datasets/oc20.md @@ -9,7 +9,7 @@ IS2* datasets are stored as LMDB files and are ready to be used upon download. S2EF train+val datasets require an additional preprocessing step. -For convenience, a self-contained script can be found [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/src/fairchem/scripts/download_data.py) to download, preprocess, and organize the data directories to be readily usable by the existing [configs](https://github.com/Open-Catalyst-Project/ocp/tree/main/src/fairchem/configs). +For convenience, a self-contained script can be found [here](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/scripts/download_data.py) to download, preprocess, and organize the data directories to be readily usable by the existing [configs](https://github.com/FAIR-Chem/fairchem/tree/main/src/fairchem/core/configs). For IS2*, run the script as: @@ -47,10 +47,10 @@ python scripts/download_data.py --task s2ef --split test -To download and process the dataset in a directory other than your local `ocp/data` folder, add the following command line argument `--data-path`. +To download and process the dataset in a directory other than your local `fairchem/data` folder, add the following command line argument `--data-path`. -Note that the baseline [configs](https://github.com/Open-Catalyst-Project/ocp/tree/main/src/fairchem/configs) -expect the data to be found in `ocp/data`, make sure you symlink your directory or +Note that the baseline [configs](https://github.com/FAIR-Chem/fairchem/tree/main/src/fairchem/core/configs) +expect the data to be found in `fairchem/data`, make sure you symlink your directory or modify the paths in the configs accordingly. The following sections list dataset download links and sizes for various S2EF diff --git a/_sources/core/fine-tuning/fine-tuning-oxides.md b/_sources/core/fine-tuning/fine-tuning-oxides.md index e7da2fdc9..574989a33 100644 --- a/_sources/core/fine-tuning/fine-tuning-oxides.md +++ b/_sources/core/fine-tuning/fine-tuning-oxides.md @@ -185,7 +185,7 @@ The train set is used for training. The test and val sets are used to check for You choose the splits you want, 80:10:10 is common. We take a simple approach to split the database here. We make an array of integers that correspond to the ids, randomly shuffle them, and then get each row in the randomized order and write them to a new db. -We provide some helper functions in `ocpmodels.common.tutorial_utils` to streamline this process. +We provide some helper functions in `fairchem.core.common.tutorial_utils` to streamline this process. ```{code-cell} ipython3 from fairchem.core.common.tutorial_utils import train_test_val_split @@ -235,7 +235,7 @@ yml ## Running the training job -`ocp` provides a `main.py` file that is used for training. Here we construct the Python command you need to run, and run it. `main.py` is not executable, so we have to run it with python, and you need the absolute path to it, which we get from the `ocp_main()` that is defined in the ocpmodels.common.tutorial_utils. +`fairchem` provides a `main.py` file that is used for training. Here we construct the Python command you need to run, and run it. `main.py` is not executable, so we have to run it with python, and you need the absolute path to it, which we get from the `fairchem_main()` that is defined in the fairchem.core.common.tutorial_utils. you must set a `mode` and provide a `config-yml`. We provide a checkpoint for a starting point, if you don't do this, it will start from scratch. @@ -261,10 +261,10 @@ This can take up to 30 minutes for 80 epochs, so we only do a few here to see wh :tags: [hide-output] import time -from fairchem.core.common.tutorial_utils import ocp_main +from fairchem.core.common.tutorial_utils import fairchem_main t0 = time.time() -! python {ocp_main()} --mode train --config-yml {yml} --checkpoint {checkpoint_path} --run-dir fine-tuning --identifier ft-oxides --amp > train.txt 2>&1 +! python {fairchem_main()} --mode train --config-yml {yml} --checkpoint {checkpoint_path} --run-dir fine-tuning --identifier ft-oxides --amp > train.txt 2>&1 print(f'Elapsed time = {time.time() - t0:1.1f} seconds') ``` diff --git a/_sources/core/gotchas.md b/_sources/core/gotchas.md index da05ec6ac..f548bb9ce 100644 --- a/_sources/core/gotchas.md +++ b/_sources/core/gotchas.md @@ -11,7 +11,7 @@ kernelspec: name: python3 --- -Common gotchas with OCP +Common gotchas with fairchem --------------------------------- # OutOfMemoryError @@ -79,7 +79,7 @@ add_adsorbate(slab, 'O', height=1.2, position='fcc') from fairchem.core.models.model_registry import model_name_to_local_file # OC20 model - trained on adsorption energies -checkpoint_path = model_name_to_local_file('GemNet-OC All', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EF-OC20-All', local_cache='/tmp/ocp_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -92,7 +92,7 @@ slab.get_potential_energy() ```{code-cell} ipython3 # An OC22 checkpoint - trained on total energy -checkpoint_path = model_name_to_local_file('GemNet-OCOC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -105,7 +105,7 @@ slab.get_potential_energy() ```{code-cell} ipython3 # This eSCN model is trained on adsorption energies -checkpoint_path = model_name_to_local_file('eSCN-L4-M2-Lay12 2M', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('eSCN-L4-M2-Lay12-S2EF-OC20-2M', local_cache='/tmp/ocp_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -128,12 +128,12 @@ WARNING:root:Unrecognized arguments: ['symmetric_edge_symmetrization'] You can ignore this warning, it is not important for predictions. -## Unable to identify OCP trainer +## Unable to identify ocp trainer The trainer is not specified in some checkpoints, and defaults to `forces` which means energy and forces are calculated. This is the default for the ASE OCP calculator, and this warning just alerts you it is setting that. ``` -WARNING:root:Unable to identify OCP trainer, defaulting to `forces`. Specify the `trainer` argument into OCPCalculator if otherwise. +WARNING:root:Unable to identify ocp trainer, defaulting to `forces`. Specify the `trainer` argument into OCPCalculator if otherwise. ``` +++ @@ -224,7 +224,7 @@ atoms.get_potential_energy() # Stochastic simulation results Some models are not deterministic (SCN/eSCN/EqV2), i.e. you can get slightly different answers each time you run it. -An example is shown below. See [Issue 563](https://github.com/Open-Catalyst-Project/ocp/issues/563) for more discussion. +An example is shown below. See [Issue 563](https://github.com/FAIR-Chem/fairchem/issues/563) for more discussion. This happens because a random selection of is made to sample edges, and a different selection is made each time you run it. ```{code-cell} ipython3 @@ -254,7 +254,7 @@ for result in results: # The forces don't sum to zero -In DFT, the forces on all the atoms should sum to zero; otherwise, there is a net translational or rotational force present. This is not enforced in OCP models. Instead, individual forces are predicted, with no constraint that they sum to zero. If the force predictions are very accurate, then they sum close to zero. You can further improve this if you subtract the mean force from each atom. +In DFT, the forces on all the atoms should sum to zero; otherwise, there is a net translational or rotational force present. This is not enforced in fairchem models. Instead, individual forces are predicted, with no constraint that they sum to zero. If the force predictions are very accurate, then they sum close to zero. You can further improve this if you subtract the mean force from each atom. ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file diff --git a/_sources/core/inference.md b/_sources/core/inference.md index abc1b627c..da834e2b0 100644 --- a/_sources/core/inference.md +++ b/_sources/core/inference.md @@ -64,7 +64,7 @@ print(available_pretrained_models) ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-dTOC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-dT-S2EFS-OC22', local_cache='/tmp/ocp_checkpoints/') checkpoint_path ``` @@ -101,10 +101,10 @@ It is a good idea to redirect the output to a file. If the output gets too large ```{code-cell} ipython3 %%capture inference import time -from fairchem.core.common.tutorial_utils import ocp_main +from fairchem.core.common.tutorial_utils import fairchem_main t0 = time.time() -! python {ocp_main()} --mode predict --config-yml {yml} --checkpoint {checkpoint_path} --amp +! python {fairchem_main()} --mode predict --config-yml {yml} --checkpoint {checkpoint_path} --amp print(f'Elapsed time = {time.time() - t0:1.1f} seconds') ``` @@ -197,7 +197,7 @@ The results should be the same. It is worth noting the default precision of predictions is float16 with main.py, but with the ASE calculator the default precision is float32. Supposedly you can specify `--task.prediction_dtype=float32` at the command line to or specify it in the config.yml like we do above, but as of the tutorial this does not resolve the issue. -As noted above (see also [Issue 542](https://github.com/Open-Catalyst-Project/ocp/issues/542)), the ASE calculator and main.py use different precisions by default, which can lead to small differences. +As noted above (see also [Issue 542](https://github.com/FAIR-Chem/fairchem/issues/542)), the ASE calculator and main.py use different precisions by default, which can lead to small differences. ```{code-cell} ipython3 np.mean(np.abs(results['energy'][sind] - OCP * natoms)) # MAE diff --git a/_sources/core/install.md b/_sources/core/install.md index 8fb350f42..b60c7a7e0 100644 --- a/_sources/core/install.md +++ b/_sources/core/install.md @@ -3,27 +3,27 @@ ## conda or better yet [mamba](https://mamba.readthedocs.io/en/latest/user_guide/mamba.html) - easy We do not have official conda recipes (yet!), so to install with conda or mamba you will need to clone the -[ocp repo](https://github.com/FAIR-Chem/fairchem) and run the following from inside the repo directory to create an environment with all the +[fairchem](https://github.com/FAIR-Chem/fairchem) and run the following from inside the repo directory to create an environment with all the necessary dependencies. -1. Create an *ocp-models* environment +1. Create a *fairchem* environment 1. **GPU** The default environment uses cuda 11.8, if you need a different version you will have to edit *pytorch-cuda* version accordingly. ```bash - conda create -f env.gpu.yml + conda env create -f packages/env.gpu.yml ``` 2. **CPU** ```bash - conda create -f env.cpu.yml + conda env create -f packages/env.cpu.yml ``` -2. Activate the environment and install `ocpmodels` +2. Activate the environment and install `fairchem-core` ```bash - conda activate ocp-models - pip install . + conda activate fair-chem + pip install packages/fairchem-core ``` ## PyPi - flexible @@ -34,21 +34,28 @@ necessary dependencies. similarly by selecting the appropriate versions in the official [PyG docs](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html) -3. Install `ocpmodels` +3. Install `fairchem-core` 1. From test-PyPi (until we have our official release on PyPi soon!) ```bash - pip install -i https://test.pypi.org/simple/ ocp-models + pip install -i https://test.pypi.org/simple/fairchem-core ``` 2. Or by cloning the repo and then using pip ```bash - pip install . + pip install packages/fairchem-core ``` +## Additional packages + +`fairchem` is a namespace package, meaning all packages are installed seperately. If you need +to install other packages you can do so by: +```bash +pip install -e pip install packages/fairchem-{package-to-install} +``` ## Dev install -If you plan to make contributions you will need to clone the repo and install `ocp-models` in editable mode with dev +If you plan to make contributions you will need to clone the repo and install `fairchem-core` in editable mode with dev dependencies, ```bash -pip install -e .[dev] +pip install -e pip install packages/fairchem-core[dev] ``` diff --git a/_sources/core/lmdb_dataset_creation.md b/_sources/core/lmdb_dataset_creation.md index fa5913ae9..623d5cc91 100644 --- a/_sources/core/lmdb_dataset_creation.md +++ b/_sources/core/lmdb_dataset_creation.md @@ -16,8 +16,8 @@ kernelspec: Storing your data in an LMDB ensures very fast random read speeds for the fastest supported throughput. This was the recommended option for the majority of fairchem use cases, but has since been deprecated for [ASE LMDB files](ase_dataset_creation) -This notebook provides an overview of how to create LMDB datasets to be used with the OCP repo. This tutorial is intended -for those who wish to use OCP to train on their own datasets. Those interested in just using OCP data need not worry +This notebook provides an overview of how to create LMDB datasets to be used with the FAIRChem repo. This tutorial is intended +for those who wish to use FAIRChem to train on their own datasets. Those interested in just using FAIRChem data need not worry about these steps as they've been automated as part of this [download script](https://github.com/FAIR-Chem/fairchem/blob/master/src/core/scripts/download_data.py). @@ -128,7 +128,7 @@ for system in system_paths: initial_struc.pos_relaxed = relaxed_struc.pos # Filter data if necessary - # OCP filters adsorption energies > |10| eV + # FAIRChem filters adsorption energies > |10| eV initial_struc.sid = idx # arbitrary unique identifier @@ -195,7 +195,7 @@ for fid, data in tqdm(enumerate(data_objects), total=len(data_objects)): data.tags = torch.LongTensor(tags) # Filter data if necessary - # OCP filters adsorption energies > |10| eV and forces > |50| eV/A + # FAIRChem filters adsorption energies > |10| eV and forces > |50| eV/A # no neighbor edge case check if data.edge_index.shape[1] == 0: @@ -226,7 +226,7 @@ dataset[0] ### Advanced usage -LmdbDataset supports multiple LMDB files because the need to highly parallelize the dataset construction process. With OCP's largest split containing 135M+ frames, the need to parallelize the LMDB generation process for these was necessary. If you find yourself needing to deal with very large datasets we recommend parallelizing this process. +LmdbDataset supports multiple LMDB files because the need to highly parallelize the dataset construction process. With FAIRChem's largest split containing 135M+ frames, the need to parallelize the LMDB generation process for these was necessary. If you find yourself needing to deal with very large datasets we recommend parallelizing this process. +++ diff --git a/_sources/core/model_checkpoints.md b/_sources/core/model_checkpoints.md index f3fa4bc7c..df825dcb5 100644 --- a/_sources/core/model_checkpoints.md +++ b/_sources/core/model_checkpoints.md @@ -106,7 +106,7 @@ OC20 dataset or pretrained models, as well as the original paper for each model: |GemNet-dT | OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_09/oc22/s2ef/gndt_oc22_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-dt/gemnet-dT.yml) |0.032 |1.127 | |GemNet-OC | OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_09/oc22/s2ef/gnoc_oc22_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-oc/gemnet_oc.yml) |0.030 |0.563 | |GemNet-OC | OC20+OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_09/oc22/s2ef/gnoc_oc22_oc20_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-oc/gemnet_oc_oc20_oc22.yml) |0.027 |0.483 | -|GemNet-OC
(trained with `enforce_max_neighbors_strictly=False`, [#467](https://github.com/Open-Catalyst-Project/ocp/pull/467)) | OC20+OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2023_05/oc22/s2ef/gnoc_oc22_oc20_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-oc/gemnet_oc_oc20_oc22_degen_edges.yml) |0.027 |0.458 | +|GemNet-OC
(trained with `enforce_max_neighbors_strictly=False`, [#467](https://github.com/FAIR-Chem/fairchem/pull/467)) | OC20+OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2023_05/oc22/s2ef/gnoc_oc22_oc20_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-oc/gemnet_oc_oc20_oc22_degen_edges.yml) |0.027 |0.458 | |GemNet-OC | OC20->OC22 |[checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_09/oc22/s2ef/gnoc_finetune_all_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/gemnet-oc/gemnet_oc_finetune.yml) |0.030 |0.417 | |EquiformerV2 ($\lambda_E$=4, $\lambda_F$=100) | OC22 | [checkpoint](https://dl.fbaipublicfiles.com/opencatalystproject/models/2023_10/oc22/s2ef/eq2_121M_e4_f100_oc22_s2ef.pt) \| [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/oc22/s2ef/equiformer_v2/equiformer_v2_N@18_L@6_M@2_e4_f100_121M.yml) | 0.023 | 0.447 diff --git a/_sources/core/model_faq.md b/_sources/core/model_faq.md index 8046844be..acde8270b 100644 --- a/_sources/core/model_faq.md +++ b/_sources/core/model_faq.md @@ -4,7 +4,7 @@ If you don't find your question answered here, please feel free to [file a GitHu ## Models -### Are predictions from OCP models deterministic? +### Are predictions from FAIRChem models deterministic? By deterministic, we mean that multiple calls to the same function, given the same inputs (and seed), will produce the same results. @@ -83,7 +83,7 @@ few batches of data prior to training in order to stabilize the variance of activations. See [Sec. 6 in the GemNet paper](https://arxiv.org/abs/2106.08903) for more details on this. -We provide some set of scaling factors as part of the `ocp` codebase that you +We provide some set of scaling factors as part of the `fairchem` codebase that you can reuse by passing the `scale_file` parameter in the YAML config. For example: * GemNet-dT [scaling factors](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/s2ef/all/gemnet/scaling_factors/gemnet-dT.json) and [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/s2ef/all/gemnet/gemnet-dT.yml#L32) * GemNet-OC [scaling factors](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/s2ef/all/gemnet/scaling_factors/gemnet-oc.pt) and [config](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/configs/s2ef/all/gemnet/gemnet-oc.yml#L45) @@ -92,7 +92,7 @@ If you change any of the model architecture hyperparameters or the dataset, you should refit these scaling factors: ```bash -python ocpmodels/modules/scaling/fit.py \ +python src/fairchem/core/modules/scaling/fit.py \ --config-yml path/to/my/config.yml \ --checkpoint path/to/save/checkpoint.pt \ --mode train @@ -112,7 +112,7 @@ python main.py \ This is likely a tagging issue -- GemNet-OC computes quadruplet interactions for atoms tagged as 1 and 2 -([see code](https://github.com/FAIR-Chem/fairchem/blob/main/ocpmodels/models/gemnet_oc/gemnet_oc.py#L1020)). +([see code](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/models/gemnet_oc/gemnet_oc.py#L1020)). In OC20 parlance, `tag==1` refers to surface atoms and `tag==2` refers to adsorbate atoms. If all the atoms are tagged as 0 (check `atoms.get_tags()`), no quadruplets are computed, and part of the GemNet-OC forward pass fails. diff --git a/_sources/core/model_training.md b/_sources/core/model_training.md index 94139c113..f6f1663a8 100644 --- a/_sources/core/model_training.md +++ b/_sources/core/model_training.md @@ -203,7 +203,7 @@ dataset: There is some support for specifying arguments from the command line, such that they would override any parameter from the YAML configuration file. The parser for this relies on the [nesting level being correctly specified using a `.` -separator](https://github.com/FAIR-Chem/fairchem/blob/main/ocpmodels/common/utils.py#L357). +separator](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/common/utils.py#L357). For example, to override the training dataset path via a command line argument: diff --git a/_sources/core/ocpapi.md b/_sources/core/ocpapi.md index 7f1c6ef14..78e428fee 100644 --- a/_sources/core/ocpapi.md +++ b/_sources/core/ocpapi.md @@ -20,7 +20,7 @@ Ensure you have Python 3.9.1 or newer, and install `ocpapi` using: ```{code-cell} ipython3 %%sh -pip install ocpapi +pip install -q ocpapi ``` ## Quickstart @@ -67,7 +67,7 @@ results = await find_adsorbate_binding_sites( ) ``` -Users will be prompted to select one or more surfaces that should be relaxed. +Users will be prompted to select one or more surfaces that should be relaxed. Input to this function includes: diff --git a/_sources/legacy_tutorials/OCP_Tutorial.md b/_sources/legacy_tutorials/OCP_Tutorial.md index defb0ca49..e2a4fec04 100644 --- a/_sources/legacy_tutorials/OCP_Tutorial.md +++ b/_sources/legacy_tutorials/OCP_Tutorial.md @@ -857,8 +857,6 @@ model = { "num_atom_emb_layers": 2, "num_global_out_layers": 2, "qint_tags": [1, 2], - - "scale_file": "configs/s2ef/all/gemnet/scaling_factors/gemnet-oc.pt", } # Optimizer @@ -893,21 +891,6 @@ dataset = [ ] ``` -The above config references a scale file `configs/s2ef/all/gemnet/scaling_factors/gemnet-oc.json`. We usually train in the root directory of the ocpmodels install, so this would be local to that directory. We'll link that folder into the current one to make it happy and keep things consistent as if you were running from the root directory! - -```{code-cell} ipython3 ---- -colab: - base_uri: https://localhost:8080/ -id: 5KZvPu4hogkR -outputId: fdbbfa5c-0d7c-449f-8be5-ef2e5d17860d ---- -import ocpmodels -from fairchem.core.common.tutorial_utils import ocp_root - -! ln -s {ocp_root()}/configs ./configs -``` - +++ {"id": "8AsZpLjIQg-W"} ### Create the trainer diff --git a/_sources/tutorials/NRR/NRR_example.md b/_sources/tutorials/NRR/NRR_example.md index fb3b566ee..dfbdae394 100644 --- a/_sources/tutorials/NRR/NRR_example.md +++ b/_sources/tutorials/NRR/NRR_example.md @@ -57,12 +57,12 @@ To do this, we will enumerate adsorbate-slab configurations and run ML relaxatio +++ -Be sure to set the path in `ocdata/configs/paths.py` to point to the correct place or pass the paths as an argument. The database pickles can be found in `ocdata/databases/pkls`. We will show one explicitly here as an example and then run all of them in an automated fashion for brevity. +Be sure to set the path in `fairchem/data/oc/configs/paths.py` to point to the correct place or pass the paths as an argument. The database pickles can be found in `fairchem/data/oc/databases/pkls`. We will show one explicitly here as an example and then run all of them in an automated fashion for brevity. ```{code-cell} ipython3 -import ocdata +import fairchem.data.oc from pathlib import Path -db = Path(ocdata.__file__).parent / Path('databases/pkls/adsorbates.pkl') +db = Path(fairchem.data.oc.__file__).parent / Path('databases/pkls/adsorbates.pkl') db ``` diff --git a/_sources/tutorials/adsorbml_walkthrough.md b/_sources/tutorials/adsorbml_walkthrough.md index ecad65883..262ac3fca 100644 --- a/_sources/tutorials/adsorbml_walkthrough.md +++ b/_sources/tutorials/adsorbml_walkthrough.md @@ -68,7 +68,7 @@ There are 2 options for how to do this. (1) is really only adequate for small stuff and it is what I will show here, but if you plan to run many relaxations, you should definitely use (2). More details about writing lmdbs has been provided [here](../core/lmdb_dataset_creation.md) - follow the IS2RS/IS2RE instructions. And more information about running relaxations once the lmdb has been written is [here](../core/model_training.md). -You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/MODELS.md) +You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](../core/model_checkpoints) ```{code-cell} ipython3 from fairchem.core.common.relaxation.ase_utils import OCPCalculator diff --git a/_sources/tutorials/advanced/embeddings.md b/_sources/tutorials/advanced/embeddings.md index af725c266..04a605ae1 100644 --- a/_sources/tutorials/advanced/embeddings.md +++ b/_sources/tutorials/advanced/embeddings.md @@ -39,7 +39,7 @@ In principle other models could be adapted in a similar way. See [embedding-monk import embedding_monkeypatch ``` -The OCP project is still under active development, and it is not yet clear what the best way to access these embeddings are, so this code is not yet part of the main development branch. This code was adapted from a branch at https://github.com/Open-Catalyst-Project/ocp/blob/gnoc-embeddings. +The OCP project is still under active development, and it is not yet clear what the best way to access these embeddings are, so this code is not yet part of the main development branch. This code was adapted from a branch at https://github.com/FAIR-Chem/fairchem/blob/gnoc-embeddings. # A diagnostic example diff --git a/_sources/tutorials/advanced/fine-tuning-in-python.md b/_sources/tutorials/advanced/fine-tuning-in-python.md index 41c0055a5..afc810c20 100644 --- a/_sources/tutorials/advanced/fine-tuning-in-python.md +++ b/_sources/tutorials/advanced/fine-tuning-in-python.md @@ -22,7 +22,7 @@ from fairchem.core.common.utils import SeverityLevelBetween root = logging.getLogger() - + root.setLevel(logging.INFO) log_formatter = logging.Formatter( @@ -83,7 +83,7 @@ yml = generate_yml_config(checkpoint_path, 'config.yml', 'optim.eval_every': 10, 'optim.max_epochs': 1, 'optim.batch_size': 4, - 'logger': 'tensorboard', # don't use wandb unless you already are logged in + 'logger': 'tensorboard', # don't use wandb unless you already are logged in # Train data 'dataset.train.src': 'train.db', 'dataset.train.a2g_args.r_energy': True, @@ -103,7 +103,7 @@ yml ## Setup the training task -This essentially allows several opportunities to define and override the config. You start with the base config.yml, and then via "command-line" arguments you specify changes you want to make. +This essentially allows several opportunities to define and override the config. You start with the base config.yml, and then via "command-line" arguments you specify changes you want to make. The code is build around `submitit`, which is often used with Slurm, but also works locally. @@ -114,8 +114,8 @@ We have to mimic the `main.py` setup to get the arguments and config setup. Here ```{code-cell} ipython3 from fairchem.core.common.flags import flags parser = flags.get_parser() -args, args_override = parser.parse_known_args(["--mode=train", - "--config-yml=config.yml", +args, args_override = parser.parse_known_args(["--mode=train", + "--config-yml=config.yml", f"--checkpoint={checkpoint_path}", "--amp"]) args, args_override @@ -132,9 +132,9 @@ config # Run the training task -It is still annoying that if your output is too large the notebook will not be able to be saved. On the other hand, it is annoying to simply capture the output. +It is still annoying that if your output is too large the notebook will not be able to be saved. On the other hand, it is annoying to simply capture the output. -We are able to redirect most logging to a file above, but not all of it. The link below will open the file in a browser, and the subsequent cell captures all residual output. We do not need any of that, so it is ultimately discarded. +We are able to redirect most logging to a file above, but not all of it. The link below will open the file in a browser, and the subsequent cell captures all residual output. We do not need any of that, so it is ultimately discarded. Alternatively, you can open a Terminal and use `tail -f out.txt` to see the progress. @@ -144,7 +144,7 @@ display(FileLink('out.txt')) ``` ```{code-cell} ipython3 -with new_trainer_context(config=config, args=args) as ctx: +with new_trainer_context(config=config) as ctx: config = ctx.config task = ctx.task trainer = ctx.trainer diff --git a/_sources/tutorials/cattsunami_walkthrough.md b/_sources/tutorials/cattsunami_walkthrough.md index 767eab35a..625faa2a1 100644 --- a/_sources/tutorials/cattsunami_walkthrough.md +++ b/_sources/tutorials/cattsunami_walkthrough.md @@ -84,7 +84,7 @@ There are 2 options for how to do this. (1) is really only adequate for small stuff and it is what I will show here, but if you plan to run many relaxations, you should definitely use (2). More details about writing lmdbs has been provided [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/tutorials/lmdb_dataset_creation.ipynb) - follow the IS2RS/IS2RE instructions. And more information about running relaxations once the lmdb has been written is [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/TRAIN.md#initial-structure-to-relaxed-structure-is2rs). -You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/MODELS.md) +You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](../core/model_checkpoints) ```{code-cell} ipython3 # Relax the reactant systems diff --git a/_sources/tutorials/intro.md b/_sources/tutorials/intro.md index 6f48efffd..51799a4d8 100644 --- a/_sources/tutorials/intro.md +++ b/_sources/tutorials/intro.md @@ -36,25 +36,25 @@ Today an MLP consists of three things: 2. A dataset that provides the atomistic systems and the desired output labels. This label could be energy, forces, or other atomistic properties. 3. A checkpoint that stores the trained model for use in predictions. -The [Open Catalyst Project (OCP)](https://github.com/Open-Catalyst-Project) is an umbrella for these machine learned potential models, data sets, and checkpoints from training. +The [FAIRChem (Formerly Open Catalyst Project [OCP])](https://github.com/FAIR-Chem/) is an umbrella for these machine learned potential models, data sets, and checkpoints from training. +++ ### Models -OCP provides several [models](../core/models). Each model represents a different approach to featurization, and a different machine learning architecture. The models can be used for different tasks, and you will find different checkpoints associated with different datasets and tasks. +FAIRChem provides several [models](../core/models). Each model represents a different approach to featurization, and a different machine learning architecture. The models can be used for different tasks, and you will find different checkpoints associated with different datasets and tasks. +++ ### Datasets / Tasks -OCP provides several different datasets like [OC20](../core/datasets/oc20) that correspond to different tasks that range from predicting energy and forces from structures to Bader charges, relaxation energies, and others. +FAIRChem provides several different datasets like [OC20](../core/datasets/oc20) that correspond to different tasks that range from predicting energy and forces from structures to Bader charges, relaxation energies, and others. +++ ### Checkpoints -To use a pre-trained model you need to have [ocp](https://github.com/Open-Catalyst-Project/ocp) installed. Then you need to choose a checkpoint and config file which will be loaded to configure OCP for the predictions you want to make. There are two approaches to running OCP, via scripts in a shell, or using an ASE compatible calculator. +To use a pre-trained model you need to have [fairchem-core](https://github.com/FAIR-Chem/fairchem) installed. Then you need to choose a checkpoint and config file which will be loaded to configure FAIRChem for the predictions you want to make. There are two approaches to running FAIRChem, via scripts in a shell, or using an ASE compatible calculator. We will focus on the ASE compatible calculator here. To facilitate using the checkpoints, there is a set of [utilities](./ocp-tutorial) for this tutorial. You can list the checkpoints that are readily available here: @@ -80,11 +80,11 @@ This tutorial will start by using OCP in a Jupyter notebook to setup some simple ## About the compute environment -`ocpmodels.common.tutorial_utils` provides `describe_ocp` to output information that might be helpful in debugging. +`ocpmodels.common.tutorial_utils` provides `describe_fairchem` to output information that might be helpful in debugging. ```{code-cell} ipython3 -from fairchem.core.common.tutorial_utils import describe_ocp -describe_ocp() +from fairchem.core.common.tutorial_utils import describe_fairchem +describe_fairchem() ``` Let's get started! Click here to open the [next notebook](./OCP-introduction). diff --git a/_static/pygments.css b/_static/pygments.css index 997797f27..012e6a00a 100644 --- a/_static/pygments.css +++ b/_static/pygments.css @@ -3,77 +3,77 @@ html[data-theme="light"] .highlight td.linenos .normal { color: inherit; backgro html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -html[data-theme="light"] .highlight .hll { background-color: #7971292e } -html[data-theme="light"] .highlight { background: #fefefe; color: #545454 } -html[data-theme="light"] .highlight .c { color: #797129 } /* Comment */ -html[data-theme="light"] .highlight .err { color: #d91e18 } /* Error */ -html[data-theme="light"] .highlight .k { color: #7928a1 } /* Keyword */ -html[data-theme="light"] .highlight .l { color: #797129 } /* Literal */ -html[data-theme="light"] .highlight .n { color: #545454 } /* Name */ -html[data-theme="light"] .highlight .o { color: #008000 } /* Operator */ -html[data-theme="light"] .highlight .p { color: #545454 } /* Punctuation */ -html[data-theme="light"] .highlight .ch { color: #797129 } /* Comment.Hashbang */ -html[data-theme="light"] .highlight .cm { color: #797129 } /* Comment.Multiline */ -html[data-theme="light"] .highlight .cp { color: #797129 } /* Comment.Preproc */ -html[data-theme="light"] .highlight .cpf { color: #797129 } /* Comment.PreprocFile */ -html[data-theme="light"] .highlight .c1 { color: #797129 } /* Comment.Single */ -html[data-theme="light"] .highlight .cs { color: #797129 } /* Comment.Special */ -html[data-theme="light"] .highlight .gd { color: #007faa } /* Generic.Deleted */ +html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } +html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } +html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ +html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ +html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ +html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ -html[data-theme="light"] .highlight .gh { color: #007faa } /* Generic.Heading */ +html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ -html[data-theme="light"] .highlight .gu { color: #007faa } /* Generic.Subheading */ -html[data-theme="light"] .highlight .kc { color: #7928a1 } /* Keyword.Constant */ -html[data-theme="light"] .highlight .kd { color: #7928a1 } /* Keyword.Declaration */ -html[data-theme="light"] .highlight .kn { color: #7928a1 } /* Keyword.Namespace */ -html[data-theme="light"] .highlight .kp { color: #7928a1 } /* Keyword.Pseudo */ -html[data-theme="light"] .highlight .kr { color: #7928a1 } /* Keyword.Reserved */ -html[data-theme="light"] .highlight .kt { color: #797129 } /* Keyword.Type */ -html[data-theme="light"] .highlight .ld { color: #797129 } /* Literal.Date */ -html[data-theme="light"] .highlight .m { color: #797129 } /* Literal.Number */ -html[data-theme="light"] .highlight .s { color: #008000 } /* Literal.String */ -html[data-theme="light"] .highlight .na { color: #797129 } /* Name.Attribute */ -html[data-theme="light"] .highlight .nb { color: #797129 } /* Name.Builtin */ -html[data-theme="light"] .highlight .nc { color: #007faa } /* Name.Class */ -html[data-theme="light"] .highlight .no { color: #007faa } /* Name.Constant */ -html[data-theme="light"] .highlight .nd { color: #797129 } /* Name.Decorator */ -html[data-theme="light"] .highlight .ni { color: #008000 } /* Name.Entity */ -html[data-theme="light"] .highlight .ne { color: #7928a1 } /* Name.Exception */ -html[data-theme="light"] .highlight .nf { color: #007faa } /* Name.Function */ -html[data-theme="light"] .highlight .nl { color: #797129 } /* Name.Label */ -html[data-theme="light"] .highlight .nn { color: #545454 } /* Name.Namespace */ -html[data-theme="light"] .highlight .nx { color: #545454 } /* Name.Other */ -html[data-theme="light"] .highlight .py { color: #007faa } /* Name.Property */ -html[data-theme="light"] .highlight .nt { color: #007faa } /* Name.Tag */ -html[data-theme="light"] .highlight .nv { color: #d91e18 } /* Name.Variable */ -html[data-theme="light"] .highlight .ow { color: #7928a1 } /* Operator.Word */ -html[data-theme="light"] .highlight .pm { color: #545454 } /* Punctuation.Marker */ -html[data-theme="light"] .highlight .w { color: #545454 } /* Text.Whitespace */ -html[data-theme="light"] .highlight .mb { color: #797129 } /* Literal.Number.Bin */ -html[data-theme="light"] .highlight .mf { color: #797129 } /* Literal.Number.Float */ -html[data-theme="light"] .highlight .mh { color: #797129 } /* Literal.Number.Hex */ -html[data-theme="light"] .highlight .mi { color: #797129 } /* Literal.Number.Integer */ -html[data-theme="light"] .highlight .mo { color: #797129 } /* Literal.Number.Oct */ -html[data-theme="light"] .highlight .sa { color: #008000 } /* Literal.String.Affix */ -html[data-theme="light"] .highlight .sb { color: #008000 } /* Literal.String.Backtick */ -html[data-theme="light"] .highlight .sc { color: #008000 } /* Literal.String.Char */ -html[data-theme="light"] .highlight .dl { color: #008000 } /* Literal.String.Delimiter */ -html[data-theme="light"] .highlight .sd { color: #008000 } /* Literal.String.Doc */ -html[data-theme="light"] .highlight .s2 { color: #008000 } /* Literal.String.Double */ -html[data-theme="light"] .highlight .se { color: #008000 } /* Literal.String.Escape */ -html[data-theme="light"] .highlight .sh { color: #008000 } /* Literal.String.Heredoc */ -html[data-theme="light"] .highlight .si { color: #008000 } /* Literal.String.Interpol */ -html[data-theme="light"] .highlight .sx { color: #008000 } /* Literal.String.Other */ -html[data-theme="light"] .highlight .sr { color: #d91e18 } /* Literal.String.Regex */ -html[data-theme="light"] .highlight .s1 { color: #008000 } /* Literal.String.Single */ -html[data-theme="light"] .highlight .ss { color: #007faa } /* Literal.String.Symbol */ -html[data-theme="light"] .highlight .bp { color: #797129 } /* Name.Builtin.Pseudo */ -html[data-theme="light"] .highlight .fm { color: #007faa } /* Name.Function.Magic */ -html[data-theme="light"] .highlight .vc { color: #d91e18 } /* Name.Variable.Class */ -html[data-theme="light"] .highlight .vg { color: #d91e18 } /* Name.Variable.Global */ -html[data-theme="light"] .highlight .vi { color: #d91e18 } /* Name.Variable.Instance */ -html[data-theme="light"] .highlight .vm { color: #797129 } /* Name.Variable.Magic */ -html[data-theme="light"] .highlight .il { color: #797129 } /* Literal.Number.Integer.Long */ +html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ html[data-theme="dark"] .highlight pre { line-height: 125%; } html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } diff --git a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html new file mode 100644 index 000000000..c13ccaa43 --- /dev/null +++ b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html @@ -0,0 +1,852 @@ + + + + + + + + + + + adsorbml.2023_neurips_challenge.challenge_eval — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

adsorbml.2023_neurips_challenge.challenge_eval

+ +
+ +
+
+ + + + +
+ +
+

adsorbml.2023_neurips_challenge.challenge_eval#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + +

is_successful(best_pred_energy, best_dft_energy[, ...])

Computes the success rate given the best predicted energy

compute_valid_ml_success(ml_data, dft_data)

Computes validated ML success rates.

get_dft_data(targets)

Organizes the released target mapping for evaluation lookup.

process_ml_data(results_file, model, metadata, ...)

For ML systems in which no configurations made it through the physical

parse_args()

main()

This script takes in your prediction file (npz format)

+
+
+adsorbml.2023_neurips_challenge.challenge_eval.is_successful(best_pred_energy, best_dft_energy, SUCCESS_THRESHOLD=0.1)#
+

Computes the success rate given the best predicted energy +and the best ground truth DFT energy.

+

success_parity: The standard definition for success, where ML needs to be +within the SUCCESS_THRESHOLD, or lower, of the DFT energy.

+

Returns: Bool

+
+ +
+
+adsorbml.2023_neurips_challenge.challenge_eval.compute_valid_ml_success(ml_data, dft_data)#
+

Computes validated ML success rates. +Here, results are generated only from ML. DFT single-points are used to +validate whether the ML energy is within 0.1eV of the DFT energy of the +predicted structure. If valid, the ML energy is compared to the ground +truth DFT energy, otherwise it is discarded.

+

Return validated ML success rates.

+
+ +
+
+adsorbml.2023_neurips_challenge.challenge_eval.get_dft_data(targets)#
+

Organizes the released target mapping for evaluation lookup.

+
+
Returns: Dict:
+
{

‘system_id 1’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, +‘system_id 2’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, +…

+
+
+

}

+
+
+
+ +
+
+adsorbml.2023_neurips_challenge.challenge_eval.process_ml_data(results_file, model, metadata, ml_dft_targets, dft_data)#
+

For ML systems in which no configurations made it through the physical +constraint checks, set energies to an arbitrarily high value to ensure +a failure case in evaluation.

+
+
Returns: Dict:
+
{

‘system_id 1’: {‘config_id 1’: {‘ml_energy’: predicted energy, ‘ml+dft_energy’: dft energy of ML structure} …}, +‘system_id 2’: {‘config_id 1’: {‘ml_energy’: predicted energy, ‘ml+dft_energy’: dft energy of ML structure} …}, +…

+
+
+

}

+
+
+
+ +
+
+adsorbml.2023_neurips_challenge.challenge_eval.parse_args()#
+
+ +
+
+adsorbml.2023_neurips_challenge.challenge_eval.main()#
+

This script takes in your prediction file (npz format) +and the ML model name used for ML relaxations. +Then using a mapping file, dft ground truth energy, +and ML relaxed dft energy returns the success rate of your predictions.

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/adsorbml/scripts/dense_eval/index.html b/autoapi/adsorbml/scripts/dense_eval/index.html new file mode 100644 index 000000000..6fee19199 --- /dev/null +++ b/autoapi/adsorbml/scripts/dense_eval/index.html @@ -0,0 +1,953 @@ + + + + + + + + + + + adsorbml.scripts.dense_eval — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

adsorbml.scripts.dense_eval#

+

AdsorbML evaluation script. This script expects the results-file to be +organized in a very specific structure in order to evaluate successfully.

+

Results are to be saved out in a dictionary pickle file, where keys are the +system_id and the values are energies and compute information for a +specified config_id. For each config_id that successfully passes the +physical constraints defined in the manuscript, the following information must +be provided:

+
+

ml_energy: The ML predicted adsorption energy on that particular config_id.

+

ml+dft_energy: The DFT adsorption energy (SP or RX) as evaluated on +the predicted ML config_id structure. Do note use raw DFT energies, +ensure these are referenced correctly. None if not available.

+

scf_steps: Total number of SCF steps involved in determining the DFT +adsorption energy on the predicted ML config_id. For relaxation +methods (ML+RX), sum all SCF steps across all frames. 0 if not +available.

+

ionic_steps: Total number of ionic steps in determining the DFT +adsorption energy on the predicted ML config_id. This will be 1 for +single-point methods (ML+SP). 0 if not available.

+
+

NOTE - It is possible that due to the required filtering of physical +constraints, no configurations are valid for a particular system_id. In +this case the system or config id can be excluded entirely from the +results file and will be treated as a failure point at evaluation time.

+
+
e.g.
+
{
+
“6_1134_23”:
+
{
+
“rand11”: {

“ml_energy”: -1.234, +“ml+dft_energy”: -1.456, +“scf_steps”: 33, +“ionic_steps”: 1,

+
+
+

}, +“rand5”: {

+
+

“ml_energy”: -2.489, +“ml+dft_energy”: -2.109, +“scf_steps”: 16, +“ionic_steps”: 1,

+
+

}, +. +. +.

+
+
+

},

+
+
“7_6566_62” :
+
{
+
“rand79”: {

“ml_energy”: -1.234, +“ml+dft_energy”: -1.456, +“scf_steps”: 33, +“ionic_steps”: 1,

+
+
+

}, +. +. +.

+
+
+

},

+
+
+

.

+
+
+

}

+
+
+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + +

is_successful(best_ml_dft_energy, best_dft_energy)

Computes the success rate given the best ML+DFT energy and the best ground

compute_hybrid_success(ml_data, dft_data, k)

Computes AdsorbML success rates at varying top-k values.

compute_valid_ml_success(ml_data, dft_data)

Computes validated ML success rates.

get_dft_data(targets)

Organizes the released target mapping for evaluation lookup.

get_dft_compute(counts)

Calculates the total DFT compute associated with establishing a ground

filter_ml_data(ml_data, dft_data)

For ML systems in which no configurations made it through the physical

+
+
+

Attributes#

+ + + + + + + + + +

SUCCESS_THRESHOLD

parser

+
+
+adsorbml.scripts.dense_eval.SUCCESS_THRESHOLD = 0.1#
+
+ +
+
+adsorbml.scripts.dense_eval.is_successful(best_ml_dft_energy, best_dft_energy)#
+

Computes the success rate given the best ML+DFT energy and the best ground +truth DFT energy.

+

success_parity: The standard definition for success, where ML needs to be +within the SUCCESS_THRESHOLD, or lower, of the DFT energy.

+

success_much_better: A system in which the ML energy is predicted to be +much lower (less than the SUCCESS_THRESHOLD) of the DFT energy.

+
+ +
+
+adsorbml.scripts.dense_eval.compute_hybrid_success(ml_data, dft_data, k)#
+

Computes AdsorbML success rates at varying top-k values. +Here, results are generated for the hybrid method, where the top-k ML +energies are used to to run DFT on the corresponding ML structures. The +resulting energies are then compared to the ground truth DFT energies.

+

Return success rates and DFT compute usage at varying k.

+
+ +
+
+adsorbml.scripts.dense_eval.compute_valid_ml_success(ml_data, dft_data)#
+

Computes validated ML success rates. +Here, results are generated only from ML. DFT single-points are used to +validate whether the ML energy is within 0.1eV of the DFT energy of the +predicted structure. If valid, the ML energy is compared to the ground +truth DFT energy, otherwise it is discarded.

+

Return validated ML success rates.

+
+ +
+
+adsorbml.scripts.dense_eval.get_dft_data(targets)#
+

Organizes the released target mapping for evaluation lookup.

+
+
oc20dense_targets.pkl:

[‘system_id 1’: [(‘config_id 1’, dft_adsorption_energy), (‘config_id 2’, dft_adsorption_energy)], `system_id 2]

+
+
Returns: Dict:
+
{

‘system_id 1’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, +‘system_id 2’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, +…

+
+
+

}

+
+
+
+ +
+
+adsorbml.scripts.dense_eval.get_dft_compute(counts)#
+

Calculates the total DFT compute associated with establishing a ground +truth using the released DFT timings: oc20dense_compute.pkl.

+

Compute is measured in the total number of self-consistent steps (SC). The +total number of ionic steps is also included for reference.

+
+ +
+
+adsorbml.scripts.dense_eval.filter_ml_data(ml_data, dft_data)#
+

For ML systems in which no configurations made it through the physical +constraint checks, set energies to an arbitrarily high value to ensure +a failure case in evaluation.

+
+ +
+
+adsorbml.scripts.dense_eval.parser#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/adsorbml/scripts/process_mlrs/index.html b/autoapi/adsorbml/scripts/process_mlrs/index.html new file mode 100644 index 000000000..bd8de618a --- /dev/null +++ b/autoapi/adsorbml/scripts/process_mlrs/index.html @@ -0,0 +1,842 @@ + + + + + + + + + + + adsorbml.scripts.process_mlrs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

adsorbml.scripts.process_mlrs

+ +
+ +
+
+ + + + +
+ +
+

adsorbml.scripts.process_mlrs#

+

This script processes ML relaxations and sets it up for the next step. +- Reads final energy and structure for each relaxation +- Filters out anomalies +- Groups together all configurations for one adsorbate-surface system +- Sorts configs by lowest energy first

+

The following files are saved out: +- cache_sorted_byE.pkl: dict going from the system ID (bulk, surface, adsorbate)

+
+

to a list of configs and their relaxed structures, sorted by lowest energy first. +This is later used by write_top_k_vasp.py.

+
+
    +
  • +
    anomalies_by_sid.pkl: dict going from integer sid to boolean representing

    whether it was an anomaly. Anomalies are already excluded from cache_sorted_byE.pkl +and this file is only used for extra analyses.

    +
    +
    +
  • +
  • errors_by_sid.pkl: any errors that occurred

  • +
+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

parse_args()

min_diff(atoms_init, atoms_final)

process_mlrs(arg)

+
+
+

Attributes#

+ + + + + + + + + + + + +

SURFACE_CHANGE_CUTOFF_MULTIPLIER

DESORPTION_CUTOFF_MULTIPLIER

args

+
+
+adsorbml.scripts.process_mlrs.SURFACE_CHANGE_CUTOFF_MULTIPLIER = 1.5#
+
+ +
+
+adsorbml.scripts.process_mlrs.DESORPTION_CUTOFF_MULTIPLIER = 1.5#
+
+ +
+
+adsorbml.scripts.process_mlrs.parse_args()#
+
+ +
+
+adsorbml.scripts.process_mlrs.min_diff(atoms_init, atoms_final)#
+
+ +
+
+adsorbml.scripts.process_mlrs.process_mlrs(arg)#
+
+ +
+
+adsorbml.scripts.process_mlrs.args#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/adsorbml/scripts/utils/index.html b/autoapi/adsorbml/scripts/utils/index.html new file mode 100644 index 000000000..a559a76e3 --- /dev/null +++ b/autoapi/adsorbml/scripts/utils/index.html @@ -0,0 +1,800 @@ + + + + + + + + + + + adsorbml.scripts.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

adsorbml.scripts.utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

adsorbml.scripts.utils#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

converged_oszicar(path[, nelm, ediff, idx])

--- FOR VASP USERS ---

count_scf(path)

--- FOR VASP USERS ---

+
+
+adsorbml.scripts.utils.converged_oszicar(path, nelm=60, ediff=0.0001, idx=0)#
+

— FOR VASP USERS —

+

Given a folder containing DFT outputs, ensures the system has converged +electronically.

+
+
Parameters:
+
    +
  • path – Path to DFT outputs.

  • +
  • nelm – Maximum number of electronic steps used.

  • +
  • ediff – Energy difference condition for terminating the electronic loop.

  • +
  • idx – Frame to check for electronic convergence. 0 for SP, -1 for RX.

  • +
+
+
+
+ +
+
+adsorbml.scripts.utils.count_scf(path)#
+

— FOR VASP USERS —

+

Given a folder containing DFT outputs, compute total ionic and SCF steps

+
+
Parameters:
+

path – Path to DFT outputs.

+
+
Returns:
+

Total number of electronic steps performed. +ionic_steps (int): Total number of ionic steps performed.

+
+
Return type:
+

scf_steps (int)

+
+
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/adsorbml/scripts/write_top_k_vasp/index.html b/autoapi/adsorbml/scripts/write_top_k_vasp/index.html new file mode 100644 index 000000000..a9c4412ed --- /dev/null +++ b/autoapi/adsorbml/scripts/write_top_k_vasp/index.html @@ -0,0 +1,754 @@ + + + + + + + + + + + adsorbml.scripts.write_top_k_vasp — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

adsorbml.scripts.write_top_k_vasp

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

adsorbml.scripts.write_top_k_vasp#

+
+

Module Contents#

+
+
+adsorbml.scripts.write_top_k_vasp.VASP_FLAGS#
+
+ +
+
+adsorbml.scripts.write_top_k_vasp.parser#
+
+ +
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/_cli/index.html b/autoapi/core/_cli/index.html new file mode 100644 index 000000000..34ce8d05c --- /dev/null +++ b/autoapi/core/_cli/index.html @@ -0,0 +1,834 @@ + + + + + + + + + + + core._cli — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core._cli

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core._cli#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Runner

Derived callable classes are requeued after timeout with their current

+
+
+

Functions#

+ + + + + + +

main()

Run the main fairchem program.

+
+
+class core._cli.Runner(distributed: bool = False)#
+

Bases: submitit.helpers.Checkpointable

+

Derived callable classes are requeued after timeout with their current +state dumped at checkpoint.

+

__call__ method must be implemented to make your class a callable.

+
+

Note

+

The following implementation of the checkpoint method resubmits the full current +state of the callable (self) with the initial argument. You may want to replace the method to +curate the state (dump a neural network to a standard format and remove it from +the state so that not to pickle it) and change/remove the initial parameters.

+
+
+
+__call__(config: dict) None#
+
+ +
+
+checkpoint(*args, **kwargs)#
+

Resubmits the same callable with the same arguments

+
+ +
+ +
+
+core._cli.main()#
+

Run the main fairchem program.

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/data_parallel/index.html b/autoapi/core/common/data_parallel/index.html new file mode 100644 index 000000000..eba0c5847 --- /dev/null +++ b/autoapi/core/common/data_parallel/index.html @@ -0,0 +1,987 @@ + + + + + + + + + + + core.common.data_parallel — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.data_parallel#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

OCPCollater

_HasMetadata

Base class for protocol classes.

StatefulDistributedSampler

More fine-grained state DataSampler that uses training iteration and epoch

BalancedBatchSampler

Base class for all Samplers.

+
+
+

Functions#

+ + + + + + +

balanced_partition(sizes, num_parts)

Greedily partition the given set by always inserting

+
+
+class core.common.data_parallel.OCPCollater(otf_graph: bool = False)#
+
+
+__call__(data_list: list[torch_geometric.data.Data]) torch_geometric.data.Batch#
+
+ +
+ +
+
+core.common.data_parallel.balanced_partition(sizes: numpy.typing.NDArray[numpy.int_], num_parts: int)#
+

Greedily partition the given set by always inserting +the largest element into the smallest partition.

+
+ +
+
+class core.common.data_parallel._HasMetadata#
+

Bases: Protocol

+

Base class for protocol classes.

+

Protocol classes are defined as:

+
class Proto(Protocol):
+    def meth(self) -> int:
+        ...
+
+
+

Such classes are primarily used with static type checkers that recognize +structural subtyping (static duck-typing).

+

For example:

+
class C:
+    def meth(self) -> int:
+        return 0
+
+def func(x: Proto) -> int:
+    return x.meth()
+
+func(C())  # Passes static type check
+
+
+

See PEP 544 for details. Protocol classes decorated with +@typing.runtime_checkable act as simple-minded runtime protocols that check +only the presence of given attributes, ignoring their type signatures. +Protocol classes can be generic, they are defined as:

+
class GenProto(Protocol[T]):
+    def meth(self) -> T:
+        ...
+
+
+
+
+property metadata_path: pathlib.Path#
+
+ +
+ +
+
+class core.common.data_parallel.StatefulDistributedSampler(dataset, batch_size, **kwargs)#
+

Bases: torch.utils.data.DistributedSampler

+

More fine-grained state DataSampler that uses training iteration and epoch +both for shuffling data. PyTorch DistributedSampler only uses epoch +for the shuffling and starts sampling data from the start. In case of training +on very large data, we train for one epoch only and when we resume training, +we want to resume the data sampler from the training iteration.

+
+
+__iter__()#
+
+ +
+
+set_epoch_and_start_iteration(epoch, start_iter)#
+
+ +
+ +
+
+class core.common.data_parallel.BalancedBatchSampler(dataset, batch_size: int, num_replicas: int, rank: int, device: torch.device, mode: str | bool = 'atoms', shuffle: bool = True, drop_last: bool = False, force_balancing: bool = False, throw_on_error: bool = False)#
+

Bases: torch.utils.data.Sampler

+

Base class for all Samplers.

+

Every Sampler subclass has to provide an __iter__() method, providing a +way to iterate over indices or lists of indices (batches) of dataset elements, and a __len__() method +that returns the length of the returned iterators.

+
+
Parameters:
+

data_source (Dataset) – This argument is not used and will be removed in 2.2.0. +You may still have custom implementation that utilizes it.

+
+
+

Example

+
>>> # xdoctest: +SKIP
+>>> class AccedingSequenceLengthSampler(Sampler[int]):
+>>>     def __init__(self, data: List[str]) -> None:
+>>>         self.data = data
+>>>
+>>>     def __len__(self) -> int:
+>>>         return len(self.data)
+>>>
+>>>     def __iter__(self) -> Iterator[int]:
+>>>         sizes = torch.tensor([len(x) for x in self.data])
+>>>         yield from torch.argsort(sizes).tolist()
+>>>
+>>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]):
+>>>     def __init__(self, data: List[str], batch_size: int) -> None:
+>>>         self.data = data
+>>>         self.batch_size = batch_size
+>>>
+>>>     def __len__(self) -> int:
+>>>         return (len(self.data) + self.batch_size - 1) // self.batch_size
+>>>
+>>>     def __iter__(self) -> Iterator[List[int]]:
+>>>         sizes = torch.tensor([len(x) for x in self.data])
+>>>         for batch in torch.chunk(torch.argsort(sizes), len(self)):
+>>>             yield batch.tolist()
+
+
+
+

Note

+

The __len__() method isn’t strictly required by +DataLoader, but is expected in any +calculation involving the length of a DataLoader.

+
+
+
+_load_dataset(dataset, mode: Literal[atoms, neighbors])#
+
+ +
+
+__len__() int#
+
+ +
+
+set_epoch_and_start_iteration(epoch: int, start_iteration: int) None#
+
+ +
+
+__iter__()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/distutils/index.html b/autoapi/core/common/distutils/index.html new file mode 100644 index 000000000..8ba7edb15 --- /dev/null +++ b/autoapi/core/common/distutils/index.html @@ -0,0 +1,914 @@ + + + + + + + + + + + core.common.distutils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.distutils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

os_environ_get_or_throw(→ str)

setup(→ None)

cleanup(→ None)

initialized(→ bool)

get_rank(→ int)

get_world_size(→ int)

is_master(→ bool)

synchronize(→ None)

broadcast(→ None)

all_reduce(→ torch.Tensor)

all_gather(→ list[torch.Tensor])

gather_objects(→ list[T])

Gather a list of pickleable objects into rank 0

+
+
+

Attributes#

+ + + + + + +

T

+
+
+core.common.distutils.T#
+
+ +
+
+core.common.distutils.os_environ_get_or_throw(x: str) str#
+
+ +
+
+core.common.distutils.setup(config) None#
+
+ +
+
+core.common.distutils.cleanup() None#
+
+ +
+
+core.common.distutils.initialized() bool#
+
+ +
+
+core.common.distutils.get_rank() int#
+
+ +
+
+core.common.distutils.get_world_size() int#
+
+ +
+
+core.common.distutils.is_master() bool#
+
+ +
+
+core.common.distutils.synchronize() None#
+
+ +
+
+core.common.distutils.broadcast(tensor: torch.Tensor, src, group=dist.group.WORLD, async_op: bool = False) None#
+
+ +
+
+core.common.distutils.all_reduce(data, group=dist.group.WORLD, average: bool = False, device=None) torch.Tensor#
+
+ +
+
+core.common.distutils.all_gather(data, group=dist.group.WORLD, device=None) list[torch.Tensor]#
+
+ +
+
+core.common.distutils.gather_objects(data: T, group: torch.distributed.ProcessGroup = dist.group.WORLD) list[T]#
+

Gather a list of pickleable objects into rank 0

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/flags/index.html b/autoapi/core/common/flags/index.html new file mode 100644 index 000000000..6ec84e86f --- /dev/null +++ b/autoapi/core/common/flags/index.html @@ -0,0 +1,821 @@ + + + + + + + + + + + core.common.flags — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.flags

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.flags#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Flags

+
+
+

Attributes#

+ + + + + + +

flags

+
+
+class core.common.flags.Flags#
+
+
+get_parser() argparse.ArgumentParser#
+
+ +
+
+add_core_args() None#
+
+ +
+ +
+
+core.common.flags.flags#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/gp_utils/index.html b/autoapi/core/common/gp_utils/index.html new file mode 100644 index 000000000..be8a8a8c1 --- /dev/null +++ b/autoapi/core/common/gp_utils/index.html @@ -0,0 +1,1484 @@ + + + + + + + + + + + core.common.gp_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.gp_utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

CopyToModelParallelRegion

Base class to create custom autograd.Function.

ReduceFromModelParallelRegion

Base class to create custom autograd.Function.

ScatterToModelParallelRegion

Base class to create custom autograd.Function.

GatherFromModelParallelRegion

Base class to create custom autograd.Function.

+
+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ensure_div(→ None)

divide_and_check_no_remainder(→ int)

setup_gp(→ None)

cleanup_gp(→ None)

initialized(→ bool)

get_dp_group()

get_gp_group()

get_dp_rank(→ int)

get_gp_rank(→ int)

get_dp_world_size(→ int)

get_gp_world_size(→ int)

pad_tensor(→ torch.Tensor)

trim_tensor(tensor[, sizes, dim])

_split_tensor(tensor, num_parts[, dim, contiguous_chunks])

_reduce(→ torch.Tensor)

_split(→ torch.Tensor)

_gather(→ torch.Tensor)

_gather_with_padding(→ torch.Tensor)

copy_to_model_parallel_region(→ torch.Tensor)

reduce_from_model_parallel_region(→ torch.Tensor)

scatter_to_model_parallel_region(→ torch.Tensor)

gather_from_model_parallel_region(→ torch.Tensor)

+
+
+

Attributes#

+ + + + + + + + + +

_GRAPH_PARALLEL_GROUP

_DATA_PARALLEL_GROUP

+
+
+core.common.gp_utils._GRAPH_PARALLEL_GROUP#
+
+ +
+
+core.common.gp_utils._DATA_PARALLEL_GROUP#
+
+ +
+
+core.common.gp_utils.ensure_div(a: int, b: int) None#
+
+ +
+
+core.common.gp_utils.divide_and_check_no_remainder(a: int, b: int) int#
+
+ +
+
+core.common.gp_utils.setup_gp(config) None#
+
+ +
+
+core.common.gp_utils.cleanup_gp() None#
+
+ +
+
+core.common.gp_utils.initialized() bool#
+
+ +
+
+core.common.gp_utils.get_dp_group()#
+
+ +
+
+core.common.gp_utils.get_gp_group()#
+
+ +
+
+core.common.gp_utils.get_dp_rank() int#
+
+ +
+
+core.common.gp_utils.get_gp_rank() int#
+
+ +
+
+core.common.gp_utils.get_dp_world_size() int#
+
+ +
+
+core.common.gp_utils.get_gp_world_size() int#
+
+ +
+
+core.common.gp_utils.pad_tensor(tensor: torch.Tensor, dim: int = -1, target_size: int | None = None) torch.Tensor#
+
+ +
+
+core.common.gp_utils.trim_tensor(tensor: torch.Tensor, sizes: torch.Tensor | None = None, dim: int = 0)#
+
+ +
+
+core.common.gp_utils._split_tensor(tensor: torch.Tensor, num_parts: int, dim: int = -1, contiguous_chunks: bool = False)#
+
+ +
+
+core.common.gp_utils._reduce(ctx: Any, input: torch.Tensor) torch.Tensor#
+
+ +
+
+core.common.gp_utils._split(input: torch.Tensor, dim: int = -1) torch.Tensor#
+
+ +
+
+core.common.gp_utils._gather(input: torch.Tensor, dim: int = -1) torch.Tensor#
+
+ +
+
+core.common.gp_utils._gather_with_padding(input: torch.Tensor, dim: int = -1) torch.Tensor#
+
+ +
+
+class core.common.gp_utils.CopyToModelParallelRegion(*args, **kwargs)#
+

Bases: torch.autograd.Function

+

Base class to create custom autograd.Function.

+

To create a custom autograd.Function, subclass this class and implement +the forward() and backward() static methods. Then, to use your custom +op in the forward pass, call the class method apply. Do not call +forward() directly.

+

To ensure correctness and best performance, make sure you are calling the +correct methods on ctx and validating your backward function using +torch.autograd.gradcheck().

+

See extending-autograd for more details on how to use this class.

+

Examples:

+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
+>>> class Exp(Function):
+>>>     @staticmethod
+>>>     def forward(ctx, i):
+>>>         result = i.exp()
+>>>         ctx.save_for_backward(result)
+>>>         return result
+>>>
+>>>     @staticmethod
+>>>     def backward(ctx, grad_output):
+>>>         result, = ctx.saved_tensors
+>>>         return grad_output * result
+>>>
+>>> # Use it by calling the apply method:
+>>> # xdoctest: +SKIP
+>>> output = Exp.apply(input)
+
+
+
+
+static forward(ctx, input: torch.Tensor) torch.Tensor#
+

Define the forward of the custom autograd Function.

+

This function is to be overridden by all subclasses. +There are two ways to define forward:

+

Usage 1 (Combined forward and ctx):

+
@staticmethod
+def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
+    pass
+
+
+
    +
  • It must accept a context ctx as the first argument, followed by any +number of arguments (tensors or other types).

  • +
  • See combining-forward-context for more details

  • +
+

Usage 2 (Separate forward and ctx):

+
@staticmethod
+def forward(*args: Any, **kwargs: Any) -> Any:
+    pass
+
+@staticmethod
+def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
+    pass
+
+
+
    +
  • The forward no longer accepts a ctx argument.

  • +
  • Instead, you must also override the torch.autograd.Function.setup_context() +staticmethod to handle setting up the ctx object. +output is the output of the forward, inputs are a Tuple of inputs +to the forward.

  • +
  • See extending-autograd for more details

  • +
+

The context can be used to store arbitrary data that can be then +retrieved during the backward pass. Tensors should not be stored +directly on ctx (though this is not currently enforced for +backward compatibility). Instead, tensors should be saved either with +ctx.save_for_backward() if they are intended to be used in +backward (equivalently, vjp) or ctx.save_for_forward() +if they are intended to be used for in jvp.

+
+ +
+
+static backward(ctx, grad_output: torch.Tensor) torch.Tensor#
+

Define a formula for differentiating the operation with backward mode automatic differentiation.

+

This function is to be overridden by all subclasses. +(Defining this function is equivalent to defining the vjp function.)

+

It must accept a context ctx as the first argument, followed by +as many outputs as the forward() returned (None will be passed in +for non tensor outputs of the forward function), +and it should return as many tensors, as there were inputs to +forward(). Each argument is the gradient w.r.t the given output, +and each returned value should be the gradient w.r.t. the +corresponding input. If an input is not a Tensor or is a Tensor not +requiring grads, you can just pass None as a gradient for that input.

+

The context can be used to retrieve tensors saved during the forward +pass. It also has an attribute ctx.needs_input_grad as a tuple +of booleans representing whether each input needs gradient. E.g., +backward() will have ctx.needs_input_grad[0] = True if the +first input to forward() needs gradient computed w.r.t. the +output.

+
+ +
+ +
+
+class core.common.gp_utils.ReduceFromModelParallelRegion(*args, **kwargs)#
+

Bases: torch.autograd.Function

+

Base class to create custom autograd.Function.

+

To create a custom autograd.Function, subclass this class and implement +the forward() and backward() static methods. Then, to use your custom +op in the forward pass, call the class method apply. Do not call +forward() directly.

+

To ensure correctness and best performance, make sure you are calling the +correct methods on ctx and validating your backward function using +torch.autograd.gradcheck().

+

See extending-autograd for more details on how to use this class.

+

Examples:

+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
+>>> class Exp(Function):
+>>>     @staticmethod
+>>>     def forward(ctx, i):
+>>>         result = i.exp()
+>>>         ctx.save_for_backward(result)
+>>>         return result
+>>>
+>>>     @staticmethod
+>>>     def backward(ctx, grad_output):
+>>>         result, = ctx.saved_tensors
+>>>         return grad_output * result
+>>>
+>>> # Use it by calling the apply method:
+>>> # xdoctest: +SKIP
+>>> output = Exp.apply(input)
+
+
+
+
+static forward(ctx, input: torch.Tensor) torch.Tensor#
+

Define the forward of the custom autograd Function.

+

This function is to be overridden by all subclasses. +There are two ways to define forward:

+

Usage 1 (Combined forward and ctx):

+
@staticmethod
+def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
+    pass
+
+
+
    +
  • It must accept a context ctx as the first argument, followed by any +number of arguments (tensors or other types).

  • +
  • See combining-forward-context for more details

  • +
+

Usage 2 (Separate forward and ctx):

+
@staticmethod
+def forward(*args: Any, **kwargs: Any) -> Any:
+    pass
+
+@staticmethod
+def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
+    pass
+
+
+
    +
  • The forward no longer accepts a ctx argument.

  • +
  • Instead, you must also override the torch.autograd.Function.setup_context() +staticmethod to handle setting up the ctx object. +output is the output of the forward, inputs are a Tuple of inputs +to the forward.

  • +
  • See extending-autograd for more details

  • +
+

The context can be used to store arbitrary data that can be then +retrieved during the backward pass. Tensors should not be stored +directly on ctx (though this is not currently enforced for +backward compatibility). Instead, tensors should be saved either with +ctx.save_for_backward() if they are intended to be used in +backward (equivalently, vjp) or ctx.save_for_forward() +if they are intended to be used for in jvp.

+
+ +
+
+static backward(ctx, grad_output: torch.Tensor) torch.Tensor#
+

Define a formula for differentiating the operation with backward mode automatic differentiation.

+

This function is to be overridden by all subclasses. +(Defining this function is equivalent to defining the vjp function.)

+

It must accept a context ctx as the first argument, followed by +as many outputs as the forward() returned (None will be passed in +for non tensor outputs of the forward function), +and it should return as many tensors, as there were inputs to +forward(). Each argument is the gradient w.r.t the given output, +and each returned value should be the gradient w.r.t. the +corresponding input. If an input is not a Tensor or is a Tensor not +requiring grads, you can just pass None as a gradient for that input.

+

The context can be used to retrieve tensors saved during the forward +pass. It also has an attribute ctx.needs_input_grad as a tuple +of booleans representing whether each input needs gradient. E.g., +backward() will have ctx.needs_input_grad[0] = True if the +first input to forward() needs gradient computed w.r.t. the +output.

+
+ +
+ +
+
+class core.common.gp_utils.ScatterToModelParallelRegion(*args, **kwargs)#
+

Bases: torch.autograd.Function

+

Base class to create custom autograd.Function.

+

To create a custom autograd.Function, subclass this class and implement +the forward() and backward() static methods. Then, to use your custom +op in the forward pass, call the class method apply. Do not call +forward() directly.

+

To ensure correctness and best performance, make sure you are calling the +correct methods on ctx and validating your backward function using +torch.autograd.gradcheck().

+

See extending-autograd for more details on how to use this class.

+

Examples:

+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
+>>> class Exp(Function):
+>>>     @staticmethod
+>>>     def forward(ctx, i):
+>>>         result = i.exp()
+>>>         ctx.save_for_backward(result)
+>>>         return result
+>>>
+>>>     @staticmethod
+>>>     def backward(ctx, grad_output):
+>>>         result, = ctx.saved_tensors
+>>>         return grad_output * result
+>>>
+>>> # Use it by calling the apply method:
+>>> # xdoctest: +SKIP
+>>> output = Exp.apply(input)
+
+
+
+
+static forward(ctx, input: torch.Tensor, dim: int = -1) torch.Tensor#
+

Define the forward of the custom autograd Function.

+

This function is to be overridden by all subclasses. +There are two ways to define forward:

+

Usage 1 (Combined forward and ctx):

+
@staticmethod
+def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
+    pass
+
+
+
    +
  • It must accept a context ctx as the first argument, followed by any +number of arguments (tensors or other types).

  • +
  • See combining-forward-context for more details

  • +
+

Usage 2 (Separate forward and ctx):

+
@staticmethod
+def forward(*args: Any, **kwargs: Any) -> Any:
+    pass
+
+@staticmethod
+def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
+    pass
+
+
+
    +
  • The forward no longer accepts a ctx argument.

  • +
  • Instead, you must also override the torch.autograd.Function.setup_context() +staticmethod to handle setting up the ctx object. +output is the output of the forward, inputs are a Tuple of inputs +to the forward.

  • +
  • See extending-autograd for more details

  • +
+

The context can be used to store arbitrary data that can be then +retrieved during the backward pass. Tensors should not be stored +directly on ctx (though this is not currently enforced for +backward compatibility). Instead, tensors should be saved either with +ctx.save_for_backward() if they are intended to be used in +backward (equivalently, vjp) or ctx.save_for_forward() +if they are intended to be used for in jvp.

+
+ +
+
+static backward(ctx, grad_output: torch.Tensor)#
+

Define a formula for differentiating the operation with backward mode automatic differentiation.

+

This function is to be overridden by all subclasses. +(Defining this function is equivalent to defining the vjp function.)

+

It must accept a context ctx as the first argument, followed by +as many outputs as the forward() returned (None will be passed in +for non tensor outputs of the forward function), +and it should return as many tensors, as there were inputs to +forward(). Each argument is the gradient w.r.t the given output, +and each returned value should be the gradient w.r.t. the +corresponding input. If an input is not a Tensor or is a Tensor not +requiring grads, you can just pass None as a gradient for that input.

+

The context can be used to retrieve tensors saved during the forward +pass. It also has an attribute ctx.needs_input_grad as a tuple +of booleans representing whether each input needs gradient. E.g., +backward() will have ctx.needs_input_grad[0] = True if the +first input to forward() needs gradient computed w.r.t. the +output.

+
+ +
+ +
+
+class core.common.gp_utils.GatherFromModelParallelRegion(*args, **kwargs)#
+

Bases: torch.autograd.Function

+

Base class to create custom autograd.Function.

+

To create a custom autograd.Function, subclass this class and implement +the forward() and backward() static methods. Then, to use your custom +op in the forward pass, call the class method apply. Do not call +forward() directly.

+

To ensure correctness and best performance, make sure you are calling the +correct methods on ctx and validating your backward function using +torch.autograd.gradcheck().

+

See extending-autograd for more details on how to use this class.

+

Examples:

+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
+>>> class Exp(Function):
+>>>     @staticmethod
+>>>     def forward(ctx, i):
+>>>         result = i.exp()
+>>>         ctx.save_for_backward(result)
+>>>         return result
+>>>
+>>>     @staticmethod
+>>>     def backward(ctx, grad_output):
+>>>         result, = ctx.saved_tensors
+>>>         return grad_output * result
+>>>
+>>> # Use it by calling the apply method:
+>>> # xdoctest: +SKIP
+>>> output = Exp.apply(input)
+
+
+
+
+static forward(ctx, input: torch.Tensor, dim: int = -1) torch.Tensor#
+

Define the forward of the custom autograd Function.

+

This function is to be overridden by all subclasses. +There are two ways to define forward:

+

Usage 1 (Combined forward and ctx):

+
@staticmethod
+def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
+    pass
+
+
+
    +
  • It must accept a context ctx as the first argument, followed by any +number of arguments (tensors or other types).

  • +
  • See combining-forward-context for more details

  • +
+

Usage 2 (Separate forward and ctx):

+
@staticmethod
+def forward(*args: Any, **kwargs: Any) -> Any:
+    pass
+
+@staticmethod
+def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
+    pass
+
+
+
    +
  • The forward no longer accepts a ctx argument.

  • +
  • Instead, you must also override the torch.autograd.Function.setup_context() +staticmethod to handle setting up the ctx object. +output is the output of the forward, inputs are a Tuple of inputs +to the forward.

  • +
  • See extending-autograd for more details

  • +
+

The context can be used to store arbitrary data that can be then +retrieved during the backward pass. Tensors should not be stored +directly on ctx (though this is not currently enforced for +backward compatibility). Instead, tensors should be saved either with +ctx.save_for_backward() if they are intended to be used in +backward (equivalently, vjp) or ctx.save_for_forward() +if they are intended to be used for in jvp.

+
+ +
+
+static backward(ctx, grad_output: torch.Tensor)#
+

Define a formula for differentiating the operation with backward mode automatic differentiation.

+

This function is to be overridden by all subclasses. +(Defining this function is equivalent to defining the vjp function.)

+

It must accept a context ctx as the first argument, followed by +as many outputs as the forward() returned (None will be passed in +for non tensor outputs of the forward function), +and it should return as many tensors, as there were inputs to +forward(). Each argument is the gradient w.r.t the given output, +and each returned value should be the gradient w.r.t. the +corresponding input. If an input is not a Tensor or is a Tensor not +requiring grads, you can just pass None as a gradient for that input.

+

The context can be used to retrieve tensors saved during the forward +pass. It also has an attribute ctx.needs_input_grad as a tuple +of booleans representing whether each input needs gradient. E.g., +backward() will have ctx.needs_input_grad[0] = True if the +first input to forward() needs gradient computed w.r.t. the +output.

+
+ +
+ +
+
+core.common.gp_utils.copy_to_model_parallel_region(input: torch.Tensor) torch.Tensor#
+
+ +
+
+core.common.gp_utils.reduce_from_model_parallel_region(input: torch.Tensor) torch.Tensor#
+
+ +
+
+core.common.gp_utils.scatter_to_model_parallel_region(input: torch.Tensor, dim: int = -1) torch.Tensor#
+
+ +
+
+core.common.gp_utils.gather_from_model_parallel_region(input: torch.Tensor, dim: int = -1) torch.Tensor#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/hpo_utils/index.html b/autoapi/core/common/hpo_utils/index.html new file mode 100644 index 000000000..56a23ebf4 --- /dev/null +++ b/autoapi/core/common/hpo_utils/index.html @@ -0,0 +1,807 @@ + + + + + + + + + + + core.common.hpo_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.hpo_utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.hpo_utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

tune_reporter(→ None)

Wrapper function for tune.report()

label_metric_dict(metric_dict, split)

+
+
+core.common.hpo_utils.tune_reporter(iters, train_metrics, val_metrics, test_metrics=None, metric_to_opt: str = 'val_loss', min_max: str = 'min') None#
+

Wrapper function for tune.report()

+
+
Parameters:
+
    +
  • iters (dict) – dict with training iteration info (e.g. steps, epochs)

  • +
  • train_metrics (dict) – train metrics dict

  • +
  • val_metrics (dict) – val metrics dict

  • +
  • test_metrics (dict, optional) – test metrics dict, default is None

  • +
  • metric_to_opt (str, optional) – str for val metric to optimize, default is val_loss

  • +
  • min_max (str, optional) – either “min” or “max”, determines whether metric_to_opt is to be minimized or maximized, default is min

  • +
+
+
+
+ +
+
+core.common.hpo_utils.label_metric_dict(metric_dict, split)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/index.html b/autoapi/core/common/index.html new file mode 100644 index 000000000..128ff317f --- /dev/null +++ b/autoapi/core/common/index.html @@ -0,0 +1,792 @@ + + + + + + + + + + + core.common — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/logger/index.html b/autoapi/core/common/logger/index.html new file mode 100644 index 000000000..2e097f72f --- /dev/null +++ b/autoapi/core/common/logger/index.html @@ -0,0 +1,915 @@ + + + + + + + + + + + core.common.logger — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.logger#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

Logger

Generic class to interface with various logging modules, e.g. wandb,

WandBLogger

Generic class to interface with various logging modules, e.g. wandb,

TensorboardLogger

Generic class to interface with various logging modules, e.g. wandb,

+
+
+class core.common.logger.Logger(config)#
+

Bases: abc.ABC

+

Generic class to interface with various logging modules, e.g. wandb, +tensorboard, etc.

+
+
+abstract watch(model)#
+

Monitor parameters and gradients.

+
+ +
+
+log(update_dict, step: int, split: str = '')#
+

Log some values.

+
+ +
+
+abstract log_plots(plots) None#
+
+ +
+
+abstract mark_preempting() None#
+
+ +
+ +
+
+class core.common.logger.WandBLogger(config)#
+

Bases: Logger

+

Generic class to interface with various logging modules, e.g. wandb, +tensorboard, etc.

+
+
+watch(model) None#
+

Monitor parameters and gradients.

+
+ +
+
+log(update_dict, step: int, split: str = '') None#
+

Log some values.

+
+ +
+
+log_plots(plots, caption: str = '') None#
+
+ +
+
+mark_preempting() None#
+
+ +
+ +
+
+class core.common.logger.TensorboardLogger(config)#
+

Bases: Logger

+

Generic class to interface with various logging modules, e.g. wandb, +tensorboard, etc.

+
+
+watch(model) bool#
+

Monitor parameters and gradients.

+
+ +
+
+log(update_dict, step: int, split: str = '')#
+

Log some values.

+
+ +
+
+mark_preempting() None#
+
+ +
+
+log_plots(plots) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/registry/index.html b/autoapi/core/common/registry/index.html new file mode 100644 index 000000000..b9cddab14 --- /dev/null +++ b/autoapi/core/common/registry/index.html @@ -0,0 +1,1084 @@ + + + + + + + + + + + core.common.registry — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.registry#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

# Copyright (c) Meta, Inc. and its affiliates. +# Borrowed from facebookresearch/pythia.

+

Registry is central source of truth. Inspired from Redux’s concept of +global store, Registry maintains mappings of various information to unique +keys. Special functions in registry can be used as decorators to register +different kind of classes.

+

Import the global registry object using

+

from fairchem.core.common.registry import registry

+

Various decorators for registry different kind of classes with unique keys

+
    +
  • Register a model: @registry.register_model

  • +
+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Registry

Class for registry object which acts as central source of truth.

+
+
+

Functions#

+ + + + + + +

_get_absolute_mapping(name)

+
+
+

Attributes#

+ + + + + + + + + + + + +

R

NestedDict

registry

+
+
+core.common.registry.R#
+
+ +
+
+core.common.registry.NestedDict#
+
+ +
+
+core.common.registry._get_absolute_mapping(name: str)#
+
+ +
+
+class core.common.registry.Registry#
+

Class for registry object which acts as central source of truth.

+
+
+mapping: ClassVar[NestedDict]#
+
+ +
+
+classmethod register_task(name: str)#
+

Register a new task to registry with key ‘name’ +:param name: Key with which the task will be registered.

+
+
Usage::

from fairchem.core.common.registry import registry +from fairchem.core.tasks import BaseTask +@registry.register_task(“train”) +class TrainTask(BaseTask):

+
+

+
+
+
+
+ +
+
+classmethod register_dataset(name: str)#
+

Register a dataset to registry with key ‘name’

+
+
Parameters:
+

name – Key with which the dataset will be registered.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+from fairchem.core.datasets import BaseDataset
+
+@registry.register_dataset("qm9")
+class QM9(BaseDataset):
+    ...
+
+
+
+ +
+
+classmethod register_model(name: str)#
+

Register a model to registry with key ‘name’

+
+
Parameters:
+

name – Key with which the model will be registered.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+from fairchem.core.modules.layers import CGCNNConv
+
+@registry.register_model("cgcnn")
+class CGCNN():
+    ...
+
+
+
+ +
+
+classmethod register_logger(name: str)#
+

Register a logger to registry with key ‘name’

+
+
Parameters:
+

name – Key with which the logger will be registered.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+
+@registry.register_logger("wandb")
+class WandBLogger():
+    ...
+
+
+
+ +
+
+classmethod register_trainer(name: str)#
+

Register a trainer to registry with key ‘name’

+
+
Parameters:
+

name – Key with which the trainer will be registered.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+
+@registry.register_trainer("active_discovery")
+class ActiveDiscoveryTrainer():
+    ...
+
+
+
+ +
+
+classmethod register(name: str, obj) None#
+

Register an item to registry with key ‘name’

+
+
Parameters:
+

name – Key with which the item will be registered.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+
+registry.register("config", {})
+
+
+
+ +
+
+classmethod __import_error(name: str, mapping_name: str) RuntimeError#
+
+ +
+
+classmethod get_class(name: str, mapping_name: str)#
+
+ +
+
+classmethod get_task_class(name: str)#
+
+ +
+
+classmethod get_dataset_class(name: str)#
+
+ +
+
+classmethod get_model_class(name: str)#
+
+ +
+
+classmethod get_logger_class(name: str)#
+
+ +
+
+classmethod get_trainer_class(name: str)#
+
+ +
+
+classmethod get(name: str, default=None, no_warning: bool = False)#
+

Get an item from registry with key ‘name’

+
+
Parameters:
+
    +
  • name (string) – Key whose value needs to be retrieved.

  • +
  • default – If passed and key is not in registry, default value will +be returned with a warning. Default: None

  • +
  • no_warning (bool) – If passed as True, warning when key doesn’t exist +will not be generated. Useful for cgcnn’s +internal operations. Default: False

  • +
+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+
+config = registry.get("config")
+
+
+
+ +
+
+classmethod unregister(name: str)#
+

Remove an item from registry with key ‘name’

+
+
Parameters:
+

name – Key which needs to be removed.

+
+
+

Usage:

+
from fairchem.core.common.registry import registry
+
+config = registry.unregister("config")
+
+
+
+ +
+ +
+
+core.common.registry.registry#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/relaxation/ase_utils/index.html b/autoapi/core/common/relaxation/ase_utils/index.html new file mode 100644 index 000000000..403a2f547 --- /dev/null +++ b/autoapi/core/common/relaxation/ase_utils/index.html @@ -0,0 +1,874 @@ + + + + + + + + + + + core.common.relaxation.ase_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.relaxation.ase_utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

Utilities to interface OCP models/trainers with the Atomic Simulation +Environment (ASE)

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

OCPCalculator

Base-class for all ASE calculators.

+
+
+

Functions#

+ + + + + + +

batch_to_atoms(batch)

+
+
+core.common.relaxation.ase_utils.batch_to_atoms(batch)#
+
+ +
+
+class core.common.relaxation.ase_utils.OCPCalculator(config_yml: str | None = None, checkpoint_path: str | None = None, model_name: str | None = None, local_cache: str | None = None, trainer: str | None = None, cutoff: int = 6, max_neighbors: int = 50, cpu: bool = True, seed: int | None = None)#
+

Bases: ase.calculators.calculator.Calculator

+

Base-class for all ASE calculators.

+

A calculator must raise PropertyNotImplementedError if asked for a +property that it can’t calculate. So, if calculation of the +stress tensor has not been implemented, get_stress(atoms) should +raise PropertyNotImplementedError. This can be achieved simply by not +including the string ‘stress’ in the list implemented_properties +which is a class member. These are the names of the standard +properties: ‘energy’, ‘forces’, ‘stress’, ‘dipole’, ‘charges’, +‘magmom’ and ‘magmoms’.

+
+
+implemented_properties: ClassVar[list[str]] = ['energy', 'forces']#
+
+ +
+
+load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
+

Load existing trained model

+
+
Parameters:
+

checkpoint_path – string +Path to trained model

+
+
+
+ +
+
+calculate(atoms: ase.Atoms, properties, system_changes) None#
+

Do the calculation.

+
+
properties: list of str

List of what needs to be calculated. Can be any combination +of ‘energy’, ‘forces’, ‘stress’, ‘dipole’, ‘charges’, ‘magmom’ +and ‘magmoms’.

+
+
system_changes: list of str

List of what has changed since last calculation. Can be +any combination of these six: ‘positions’, ‘numbers’, ‘cell’, +‘pbc’, ‘initial_charges’ and ‘initial_magmoms’.

+
+
+

Subclasses need to implement this, but can ignore properties +and system_changes if they want. Calculated properties should +be inserted into results dictionary like shown in this dummy +example:

+
self.results = {'energy': 0.0,
+                'forces': np.zeros((len(atoms), 3)),
+                'stress': np.zeros(6),
+                'dipole': np.zeros(3),
+                'charges': np.zeros(len(atoms)),
+                'magmom': 0.0,
+                'magmoms': np.zeros(len(atoms))}
+
+
+

The subclass implementation should first call this +implementation to set the atoms attribute and create any missing +directories.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/relaxation/index.html b/autoapi/core/common/relaxation/index.html new file mode 100644 index 000000000..fb931d303 --- /dev/null +++ b/autoapi/core/common/relaxation/index.html @@ -0,0 +1,773 @@ + + + + + + + + + + + core.common.relaxation — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.relaxation

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.relaxation#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/relaxation/ml_relaxation/index.html b/autoapi/core/common/relaxation/ml_relaxation/index.html new file mode 100644 index 000000000..4e83d95f4 --- /dev/null +++ b/autoapi/core/common/relaxation/ml_relaxation/index.html @@ -0,0 +1,804 @@ + + + + + + + + + + + core.common.relaxation.ml_relaxation — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.relaxation.ml_relaxation

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.relaxation.ml_relaxation#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

ml_relax(batch, model, steps, fmax, relax_opt, ...[, ...])

Runs ML-based relaxations.

+
+
+core.common.relaxation.ml_relaxation.ml_relax(batch, model, steps: int, fmax: float, relax_opt, save_full_traj, device: str = 'cuda:0', transform=None, early_stop_batch: bool = False)#
+

Runs ML-based relaxations. +:param batch: object +:param model: object +:param steps: int

+
+

Max number of steps in the structure relaxation.

+
+
+
Parameters:
+
    +
  • fmax – float +Structure relaxation terminates when the max force +of the system is no bigger than fmax.

  • +
  • relax_opt – str +Optimizer and corresponding parameters to be used for structure relaxations.

  • +
  • save_full_traj – bool +Whether to save out the full ASE trajectory. If False, only save out initial and final frames.

  • +
+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/relaxation/optimizers/index.html b/autoapi/core/common/relaxation/optimizers/index.html new file mode 100644 index 000000000..3850696b2 --- /dev/null +++ b/autoapi/core/common/relaxation/optimizers/index.html @@ -0,0 +1,759 @@ + + + + + + + + + + + core.common.relaxation.optimizers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.relaxation.optimizers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.relaxation.optimizers#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.html b/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.html new file mode 100644 index 000000000..da44e7ee3 --- /dev/null +++ b/autoapi/core/common/relaxation/optimizers/lbfgs_torch/index.html @@ -0,0 +1,858 @@ + + + + + + + + + + + core.common.relaxation.optimizers.lbfgs_torch — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.relaxation.optimizers.lbfgs_torch#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

LBFGS

TorchCalc

+
+
+class core.common.relaxation.optimizers.lbfgs_torch.LBFGS(batch: torch_geometric.data.Batch, model: TorchCalc, maxstep: float = 0.01, memory: int = 100, damping: float = 0.25, alpha: float = 100.0, force_consistent=None, device: str = 'cuda:0', save_full_traj: bool = True, traj_dir: pathlib.Path | None = None, traj_names=None, early_stop_batch: bool = False)#
+
+
+get_energy_and_forces(apply_constraint: bool = True)#
+
+ +
+
+set_positions(update, update_mask) None#
+
+ +
+
+check_convergence(iteration, forces=None, energy=None)#
+
+ +
+
+run(fmax, steps)#
+
+ +
+
+step(iteration: int, forces: torch.Tensor | None, update_mask: torch.Tensor) None#
+
+ +
+
+write(energy, forces, update_mask) None#
+
+ +
+ +
+
+class core.common.relaxation.optimizers.lbfgs_torch.TorchCalc(model, transform=None)#
+
+
+get_energy_and_forces(atoms, apply_constraint: bool = True)#
+
+ +
+
+update_graph(atoms)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/transforms/index.html b/autoapi/core/common/transforms/index.html new file mode 100644 index 000000000..2147149af --- /dev/null +++ b/autoapi/core/common/transforms/index.html @@ -0,0 +1,818 @@ + + + + + + + + + + + core.common.transforms — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.transforms

+ +
+ +
+
+ + + + +
+ +
+

core.common.transforms#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

RandomRotate

Rotates node positions around a specific axis by a randomly sampled

+
+
+class core.common.transforms.RandomRotate(degrees, axes: list[int] | None = None)#
+

Rotates node positions around a specific axis by a randomly sampled +factor within a given interval.

+
+
Parameters:
+
    +
  • degrees (tuple or float) – Rotation interval from which the rotation +angle is sampled. If degrees is a number instead of a +tuple, the interval is given by \([-\mathrm{degrees}, +\mathrm{degrees}]\).

  • +
  • axes (int, optional) – The rotation axes. (default: [0, 1, 2])

  • +
+
+
+
+
+__call__(data)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/tutorial_utils/index.html b/autoapi/core/common/tutorial_utils/index.html new file mode 100644 index 000000000..57f9a867c --- /dev/null +++ b/autoapi/core/common/tutorial_utils/index.html @@ -0,0 +1,840 @@ + + + + + + + + + + + core.common.tutorial_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.tutorial_utils

+ +
+ +
+
+ + + + +
+ +
+

core.common.tutorial_utils#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

fairchem_root()

Return the root directory of the installed fairchem-core package.

fairchem_main()

Return the path to fairchem main.py

describe_fairchem()

Print some system information that could be useful in debugging.

train_test_val_split(ase_db[, ttv, files, seed])

Split an ase db into train, test and validation dbs.

generate_yml_config(checkpoint_path[, yml, delete, update])

Generate a yml config file from an existing checkpoint file.

+
+
+core.common.tutorial_utils.fairchem_root()#
+

Return the root directory of the installed fairchem-core package.

+
+ +
+
+core.common.tutorial_utils.fairchem_main()#
+

Return the path to fairchem main.py

+
+ +
+
+core.common.tutorial_utils.describe_fairchem()#
+

Print some system information that could be useful in debugging.

+
+ +
+
+core.common.tutorial_utils.train_test_val_split(ase_db, ttv=(0.8, 0.1, 0.1), files=('train.db', 'test.db', 'val.db'), seed=42)#
+

Split an ase db into train, test and validation dbs.

+

ase_db: path to an ase db containing all the data. +ttv: a tuple containing the fraction of train, test and val data. This will be normalized. +files: a tuple of filenames to write the splits into. An exception is raised if these exist.

+
+

You should delete them first.

+
+

seed: an integer for the random number generator seed

+

Returns the absolute path to files.

+
+ +
+
+core.common.tutorial_utils.generate_yml_config(checkpoint_path, yml='run.yml', delete=(), update=())#
+

Generate a yml config file from an existing checkpoint file.

+

checkpoint_path: string to path of an existing checkpoint +yml: name of file to write to. +pop: list of keys to remove from the config +update: dictionary of key:values to update

+

Use a dot notation in update.

+

Returns an absolute path to the generated yml file.

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/typing/index.html b/autoapi/core/common/typing/index.html new file mode 100644 index 000000000..40d5a3c29 --- /dev/null +++ b/autoapi/core/common/typing/index.html @@ -0,0 +1,810 @@ + + + + + + + + + + + core.common.typing — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.common.typing

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.common.typing#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

assert_is_instance(→ _T)

none_throws(→ _T)

+
+
+

Attributes#

+ + + + + + +

_T

+
+
+core.common.typing._T#
+
+ +
+
+core.common.typing.assert_is_instance(obj: object, cls: type[_T]) _T#
+
+ +
+
+core.common.typing.none_throws(x: _T | None, msg: str | None = None) _T#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/common/utils/index.html b/autoapi/core/common/utils/index.html new file mode 100644 index 000000000..9745caac4 --- /dev/null +++ b/autoapi/core/common/utils/index.html @@ -0,0 +1,1295 @@ + + + + + + + + + + + core.common.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.common.utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

UniqueKeyLoader

Complete

SeverityLevelBetween

Filter instances are used to perform arbitrary filtering of LogRecords.

+
+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

pyg2_data_transform(data)

if we're on the new pyg (2.0 or later) and if the Data stored is in older format

save_checkpoint(→ str)

warmup_lr_lambda(current_step, optim_config)

Returns a learning rate multiplier.

print_cuda_usage(→ None)

conditional_grad(dec)

Decorator to enable/disable grad depending on whether force/energy predictions are being made

plot_histogram(data[, xlabel, ylabel, title])

collate(data_list)

add_edge_distance_to_graph(batch[, device, dmin, ...])

_import_local_file(→ None)

Imports a Python file as a module

setup_experimental_imports(→ None)

Import selected directories of modules from the "experimental" subdirectory.

_get_project_root(→ pathlib.Path)

Gets the root folder of the project (the "ocp" folder)

setup_imports(→ None)

dict_set_recursively(→ None)

parse_value(value)

Parse string as Python literal if possible and fallback to string.

create_dict_from_args(args[, sep])

Create a (nested) dictionary from console arguments.

load_config(path[, previous_includes])

build_config(args, args_override)

create_grid(base_config, sweep_file)

save_experiment_log(args, jobs, configs)

get_pbc_distances(pos, edge_index, cell, cell_offsets, ...)

radius_graph_pbc(data, radius, max_num_neighbors_threshold)

get_max_neighbors_mask(natoms, index, atom_distance, ...)

Give a mask that filters out edges so that each atom has at most

get_pruned_edge_idx(→ torch.Tensor)

merge_dicts(dict1, dict2)

Recursively merge two dictionaries.

setup_logging(→ None)

compute_neighbors(data, edge_index)

check_traj_files(→ bool)

new_trainer_context(*, config[, distributed])

_resolve_scale_factor_submodule(model, name)

_report_incompat_keys(→ tuple[list[str], list[str]])

load_state_dict(→ tuple[list[str], list[str]])

scatter_det(*args, **kwargs)

get_commit_hash()

cg_change_mat(→ torch.tensor)

irreps_sum(→ int)

Returns the sum of the dimensions of the irreps up to the specified angular momentum.

update_config(base_config)

Configs created prior to OCP 2.0 are organized a little different than they

get_loss_module(loss_name)

+
+
+class core.common.utils.UniqueKeyLoader(stream)#
+

Bases: yaml.SafeLoader

+
+
+construct_mapping(node, deep=False)#
+
+ +
+ +
+
+core.common.utils.pyg2_data_transform(data: torch_geometric.data.Data)#
+

if we’re on the new pyg (2.0 or later) and if the Data stored is in older format +we need to convert the data to the new format

+
+ +
+
+core.common.utils.save_checkpoint(state, checkpoint_dir: str = 'checkpoints/', checkpoint_file: str = 'checkpoint.pt') str#
+
+ +
+
+class core.common.utils.Complete#
+
+
+__call__(data)#
+
+ +
+ +
+
+core.common.utils.warmup_lr_lambda(current_step: int, optim_config)#
+

Returns a learning rate multiplier. +Till warmup_steps, learning rate linearly increases to initial_lr, +and then gets multiplied by lr_gamma every time a milestone is crossed.

+
+ +
+
+core.common.utils.print_cuda_usage() None#
+
+ +
+
+core.common.utils.conditional_grad(dec)#
+

Decorator to enable/disable grad depending on whether force/energy predictions are being made

+
+ +
+
+core.common.utils.plot_histogram(data, xlabel: str = '', ylabel: str = '', title: str = '')#
+
+ +
+
+core.common.utils.collate(data_list)#
+
+ +
+
+core.common.utils.add_edge_distance_to_graph(batch, device='cpu', dmin: float = 0.0, dmax: float = 6.0, num_gaussians: int = 50)#
+
+ +
+
+core.common.utils._import_local_file(path: pathlib.Path, *, project_root: pathlib.Path) None#
+

Imports a Python file as a module

+
+
Parameters:
+
    +
  • path (Path) – The path to the file to import

  • +
  • project_root (Path) – The root directory of the project (i.e., the “ocp” folder)

  • +
+
+
+
+ +
+
+core.common.utils.setup_experimental_imports(project_root: pathlib.Path) None#
+

Import selected directories of modules from the “experimental” subdirectory.

+

If a file named “.include” is present in the “experimental” subdirectory, +this will be read as a list of experimental subdirectories whose module +(including in any subsubdirectories) should be imported.

+
+
Parameters:
+

project_root – The root directory of the project (i.e., the “ocp” folder)

+
+
+
+ +
+
+core.common.utils._get_project_root() pathlib.Path#
+

Gets the root folder of the project (the “ocp” folder) +:return: The absolute path to the project root.

+
+ +
+
+core.common.utils.setup_imports(config: dict | None = None) None#
+
+ +
+
+core.common.utils.dict_set_recursively(dictionary, key_sequence, val) None#
+
+ +
+
+core.common.utils.parse_value(value)#
+

Parse string as Python literal if possible and fallback to string.

+
+ +
+
+core.common.utils.create_dict_from_args(args: list, sep: str = '.')#
+

Create a (nested) dictionary from console arguments. +Keys in different dictionary levels are separated by sep.

+
+ +
+
+core.common.utils.load_config(path: str, previous_includes: list | None = None)#
+
+ +
+
+core.common.utils.build_config(args, args_override)#
+
+ +
+
+core.common.utils.create_grid(base_config, sweep_file: str)#
+
+ +
+
+core.common.utils.save_experiment_log(args, jobs, configs)#
+
+ +
+
+core.common.utils.get_pbc_distances(pos, edge_index, cell, cell_offsets, neighbors, return_offsets: bool = False, return_distance_vec: bool = False)#
+
+ +
+
+core.common.utils.radius_graph_pbc(data, radius, max_num_neighbors_threshold, enforce_max_neighbors_strictly: bool = False, pbc=None)#
+
+ +
+
+core.common.utils.get_max_neighbors_mask(natoms, index, atom_distance, max_num_neighbors_threshold, degeneracy_tolerance: float = 0.01, enforce_max_strictly: bool = False)#
+

Give a mask that filters out edges so that each atom has at most +max_num_neighbors_threshold neighbors. +Assumes that index is sorted.

+

Enforcing the max strictly can force the arbitrary choice between +degenerate edges. This can lead to undesired behaviors; for +example, bulk formation energies which are not invariant to +unit cell choice.

+

A degeneracy tolerance can help prevent sudden changes in edge +existence from small changes in atom position, for example, +rounding errors, slab relaxation, temperature, etc.

+
+ +
+
+core.common.utils.get_pruned_edge_idx(edge_index, num_atoms: int, max_neigh: float = 1000000000.0) torch.Tensor#
+
+ +
+
+core.common.utils.merge_dicts(dict1: dict, dict2: dict)#
+

Recursively merge two dictionaries. +Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a +value, this will call itself recursively to merge these dictionaries. +This does not modify the input dictionaries (creates an internal copy). +Additionally returns a list of detected duplicates. +Adapted from TUM-DAML/seml

+
+
Parameters:
+
    +
  • dict1 (dict) – First dict.

  • +
  • dict2 (dict) – Second dict. Values in dict2 will override values from dict1 in case they share the same key.

  • +
+
+
Returns:
+

return_dict – Merged dictionaries.

+
+
Return type:
+

dict

+
+
+
+ +
+
+class core.common.utils.SeverityLevelBetween(min_level: int, max_level: int)#
+

Bases: logging.Filter

+

Filter instances are used to perform arbitrary filtering of LogRecords.

+

Loggers and Handlers can optionally use Filter instances to filter +records as desired. The base filter class only allows events which are +below a certain point in the logger hierarchy. For example, a filter +initialized with “A.B” will allow events logged by loggers “A.B”, +“A.B.C”, “A.B.C.D”, “A.B.D” etc. but not “A.BB”, “B.A.B” etc. If +initialized with the empty string, all events are passed.

+
+
+filter(record) bool#
+

Determine if the specified record is to be logged.

+

Returns True if the record should be logged, or False otherwise. +If deemed appropriate, the record may be modified in-place.

+
+ +
+ +
+
+core.common.utils.setup_logging() None#
+
+ +
+
+core.common.utils.compute_neighbors(data, edge_index)#
+
+ +
+
+core.common.utils.check_traj_files(batch, traj_dir) bool#
+
+ +
+
+core.common.utils.new_trainer_context(*, config: dict[str, Any], distributed: bool = False)#
+
+ +
+
+core.common.utils._resolve_scale_factor_submodule(model: torch.nn.Module, name: str)#
+
+ +
+
+core.common.utils._report_incompat_keys(model: torch.nn.Module, keys: torch.nn.modules.module._IncompatibleKeys, strict: bool = False) tuple[list[str], list[str]]#
+
+ +
+
+core.common.utils.load_state_dict(module: torch.nn.Module, state_dict: collections.abc.Mapping[str, torch.Tensor], strict: bool = True) tuple[list[str], list[str]]#
+
+ +
+
+core.common.utils.scatter_det(*args, **kwargs)#
+
+ +
+
+core.common.utils.get_commit_hash()#
+
+ +
+
+core.common.utils.cg_change_mat(ang_mom: int, device: str = 'cpu') torch.tensor#
+
+ +
+
+core.common.utils.irreps_sum(ang_mom: int) int#
+

Returns the sum of the dimensions of the irreps up to the specified angular momentum.

+
+
Parameters:
+

ang_mom – max angular momenttum to sum up dimensions of irreps

+
+
+
+ +
+
+core.common.utils.update_config(base_config)#
+

Configs created prior to OCP 2.0 are organized a little different than they +are now. Update old configs to fit the new expected structure.

+
+ +
+
+core.common.utils.get_loss_module(loss_name)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/_utils/index.html b/autoapi/core/datasets/_utils/index.html new file mode 100644 index 000000000..5678c87d0 --- /dev/null +++ b/autoapi/core/datasets/_utils/index.html @@ -0,0 +1,793 @@ + + + + + + + + + + + core.datasets._utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets._utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.datasets._utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

rename_data_object_keys(→ torch_geometric.data.Data)

Rename data object keys

+
+
+core.datasets._utils.rename_data_object_keys(data_object: torch_geometric.data.Data, key_mapping: dict[str, str]) torch_geometric.data.Data#
+

Rename data object keys

+
+
Parameters:
+
    +
  • data_object – data object

  • +
  • key_mapping – dictionary specifying keys to rename and new names {prev_key: new_key}

  • +
+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/ase_datasets/index.html b/autoapi/core/datasets/ase_datasets/index.html new file mode 100644 index 000000000..1409a5571 --- /dev/null +++ b/autoapi/core/datasets/ase_datasets/index.html @@ -0,0 +1,1178 @@ + + + + + + + + + + + core.datasets.ase_datasets — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.datasets.ase_datasets#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

AseAtomsDataset

This is an abstract Dataset that includes helpful utilities for turning

AseReadDataset

This Dataset uses ase.io.read to load data from a directory on disk.

AseReadMultiStructureDataset

This Dataset can read multiple structures from each file using ase.io.read.

AseDBDataset

This Dataset connects to an ASE Database, allowing the storage of atoms objects

+
+
+

Functions#

+ + + + + + +

apply_one_tags(atoms[, skip_if_nonzero, skip_always])

This function will apply tags of 1 to an ASE atoms object.

+
+
+core.datasets.ase_datasets.apply_one_tags(atoms: ase.Atoms, skip_if_nonzero: bool = True, skip_always: bool = False)#
+

This function will apply tags of 1 to an ASE atoms object. +It is used as an atoms_transform in the datasets contained in this file.

+

Certain models will treat atoms differently depending on their tags. +For example, GemNet-OC by default will only compute triplet and quadruplet interactions +for atoms with non-zero tags. This model throws an error if there are no tagged atoms. +For this reason, the default behavior is to tag atoms in structures with no tags.

+
+
Parameters:
+
    +
  • skip_if_nonzero (bool) – If at least one atom has a nonzero tag, do not tag any atoms

  • +
  • skip_always (bool) – Do not apply any tags. This arg exists so that this function can be disabled +without needing to pass a callable (which is currently difficult to do with main.py)

  • +
+
+
+
+ +
+
+class core.datasets.ase_datasets.AseAtomsDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: torch.utils.data.Dataset, abc.ABC

+

This is an abstract Dataset that includes helpful utilities for turning +ASE atoms objects into OCP-usable data objects. This should not be instantiated directly +as get_atoms_object and load_dataset_get_ids are not implemented in this base class.

+
+
Derived classes must add at least two things:

self.get_atoms_object(id): a function that takes an identifier and returns a corresponding atoms object

+
+
self.load_dataset_get_ids(config: dict): This function is responsible for any initialization/loads

of the dataset and importantly must return a list of all possible identifiers that can be passed into +self.get_atoms_object(id)

+
+
+
+
+

Identifiers need not be any particular type.

+
+
+__len__() int#
+
+ +
+
+__getitem__(idx)#
+
+ +
+
+abstract get_atoms(idx: str | int) ase.Atoms#
+
+ +
+
+abstract _load_dataset_get_ids(config)#
+
+ +
+
+abstract get_relaxed_energy(identifier)#
+
+ +
+
+close_db() None#
+
+ +
+
+get_metadata(num_samples: int = 100) dict#
+
+ +
+ +
+
+class core.datasets.ase_datasets.AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset uses ase.io.read to load data from a directory on disk. +This is intended for small-scale testing and demonstrations of OCP. +Larger datasets are better served by the efficiency of other dataset types +such as LMDB.

+

For a full list of ASE-readable filetypes, see +https://wiki.fysik.dtu.dk/ase/ase/io/io.html

+
+
Parameters:
+
    +
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    +
    +
    pattern (str): Filepath matching each file you want to read

    ex. “/POSCAR”, “.cif”, “.xyz” +search recursively with two wildcards: “*/POSCAR” or “**/*.cif”

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the files you read (ex. OUTCAR)

    +
    +
    +

    ase_read_args (dict): Keyword arguments for ase.io.read()

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file +(e.g. the last frame of a .traj).

    +
    +
    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +

    transform_args (dict): Additional keyword arguments for the transform callable

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
+
+
+
+
+_load_dataset_get_ids(config) list[pathlib.Path]#
+
+ +
+
+get_atoms(idx: str | int) ase.Atoms#
+
+ +
+
+get_relaxed_energy(identifier) float#
+
+ +
+ +
+
+class core.datasets.ase_datasets.AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset can read multiple structures from each file using ase.io.read. +The disadvantage is that all files must be read at startup. +This is a significant cost for large datasets.

+

This is intended for small-scale testing and demonstrations of OCP. +Larger datasets are better served by the efficiency of other dataset types +such as LMDB.

+

For a full list of ASE-readable filetypes, see +https://wiki.fysik.dtu.dk/ase/ase/io/io.html

+
+
Parameters:
+
    +
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    +
    +
    pattern (str): Filepath matching each file you want to read

    ex. “.traj”, “.xyz” +search recursively with two wildcards: “/POSCAR” or “/*.cif”

    +
    +
    index_file (str): Filepath to an indexing file, which contains each filename

    and the number of structures contained in each file. For instance:

    +

    /path/to/relaxation1.traj 200 +/path/to/relaxation2.traj 150

    +

    This will overrule the src and pattern that you specify!

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the files you read (ex. OUTCAR)

    +
    +
    +

    ase_read_args (dict): Keyword arguments for ase.io.read()

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file +(e.g. the last frame of a .traj).

    +
    +
    +

    use_tqdm (bool): Use TQDM progress bar when initializing dataset

    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +

    transform_args (dict): Additional keyword arguments for the transform callable

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
  • transform (callable, optional) – Additional preprocessing function for the Data object

  • +
+
+
+
+
+_load_dataset_get_ids(config) list[str]#
+
+ +
+
+get_atoms(idx: str) ase.Atoms#
+
+ +
+
+get_metadata(num_samples: int = 100) dict#
+
+ +
+
+get_relaxed_energy(identifier) float#
+
+ +
+ +
+
+class core.datasets.ase_datasets.AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset connects to an ASE Database, allowing the storage of atoms objects +with a variety of backends including JSON, SQLite, and database server options.

+

For more information, see: +https://databases.fysik.dtu.dk/ase/ase/db/db.html

+
+
Parameters:
+
    +
  • config (dict) –

    +
    src (str): Either
      +
    • the path an ASE DB,

    • +
    • the connection address of an ASE DB,

    • +
    • a folder with multiple ASE DBs,

    • +
    • a list of folders with ASE DBs

    • +
    • a glob string to use to find ASE DBs, or

    • +
    • a list of ASE db paths/addresses.

    • +
    +

    If a folder, every file will be attempted as an ASE DB, and warnings +are raised for any files that can’t connect cleanly

    +

    Note that for large datasets, ID loading can be slow and there can be many +ids, so it’s advised to make loading the id list as easy as possible. There is not +an obvious way to get a full list of ids from most ASE dbs besides simply looping +through the entire dataset. See the AseLMDBDataset which was written with this usecase +in mind.

    +
    +
    +

    connect_args (dict): Keyword arguments for ase.db.connect()

    +
    +
    select_args (dict): Keyword arguments for ase.db.select()

    You can use this to query/filter your database

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the database

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +
    +
    transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config}

    where config is a dictionary specifying arguments to the transform_function

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
  • transform (callable, optional) – deprecated?

  • +
+
+
+
+
+_load_dataset_get_ids(config: dict) list[int]#
+
+ +
+
+get_atoms(idx: int) ase.Atoms#
+

Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. +:param idx: index in dataset +:type idx: int

+
+
Returns:
+

ASE atoms corresponding to datapoint idx

+
+
Return type:
+

atoms

+
+
+
+ +
+
+static connect_db(address: str | pathlib.Path, connect_args: dict | None = None) ase.db.core.Database#
+
+ +
+
+close_db() None#
+
+ +
+
+get_metadata(num_samples: int = 100) dict#
+
+ +
+
+abstract get_relaxed_energy(identifier)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/embeddings/atomic_radii/index.html b/autoapi/core/datasets/embeddings/atomic_radii/index.html new file mode 100644 index 000000000..8213a4f79 --- /dev/null +++ b/autoapi/core/datasets/embeddings/atomic_radii/index.html @@ -0,0 +1,767 @@ + + + + + + + + + + + core.datasets.embeddings.atomic_radii — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.embeddings.atomic_radii

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.datasets.embeddings.atomic_radii#

+

Atomic radii in picometers

+

NaN stored for unavailable parameters.

+
+

Module Contents#

+
+
+core.datasets.embeddings.atomic_radii.ATOMIC_RADII#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/embeddings/continuous_embeddings/index.html b/autoapi/core/datasets/embeddings/continuous_embeddings/index.html new file mode 100644 index 000000000..00db3ecd6 --- /dev/null +++ b/autoapi/core/datasets/embeddings/continuous_embeddings/index.html @@ -0,0 +1,779 @@ + + + + + + + + + + + core.datasets.embeddings.continuous_embeddings — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.embeddings.continuous_embeddings

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.datasets.embeddings.continuous_embeddings#

+

CGCNN-like embeddings using continuous values instead of original k-hot.

+
+
Properties:

Group number +Period number +Electronegativity +Covalent radius +Valence electrons +First ionization energy +Electron affinity +Block +Atomic Volume

+
+
+

NaN stored for unavaialable parameters.

+
+

Module Contents#

+
+
+core.datasets.embeddings.continuous_embeddings.CONTINUOUS_EMBEDDINGS#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/embeddings/index.html b/autoapi/core/datasets/embeddings/index.html new file mode 100644 index 000000000..da4bfd301 --- /dev/null +++ b/autoapi/core/datasets/embeddings/index.html @@ -0,0 +1,799 @@ + + + + + + + + + + + core.datasets.embeddings — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.embeddings

+ +
+ +
+
+ + + + +
+ +
+

core.datasets.embeddings#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+
+core.datasets.embeddings.ATOMIC_RADII#
+
+ +
+
+core.datasets.embeddings.CONTINUOUS_EMBEDDINGS#
+
+ +
+
+core.datasets.embeddings.KHOT_EMBEDDINGS#
+
+ +
+
+core.datasets.embeddings.QMOF_KHOT_EMBEDDINGS#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/embeddings/khot_embeddings/index.html b/autoapi/core/datasets/embeddings/khot_embeddings/index.html new file mode 100644 index 000000000..3a22fa9d7 --- /dev/null +++ b/autoapi/core/datasets/embeddings/khot_embeddings/index.html @@ -0,0 +1,769 @@ + + + + + + + + + + + core.datasets.embeddings.khot_embeddings — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.embeddings.khot_embeddings

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.datasets.embeddings.khot_embeddings#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

Original CGCNN k-hot elemental embeddings.

+
+

Module Contents#

+
+
+core.datasets.embeddings.khot_embeddings.KHOT_EMBEDDINGS#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.html b/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.html new file mode 100644 index 000000000..9535b2ab5 --- /dev/null +++ b/autoapi/core/datasets/embeddings/qmof_khot_embeddings/index.html @@ -0,0 +1,771 @@ + + + + + + + + + + + core.datasets.embeddings.qmof_khot_embeddings — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.embeddings.qmof_khot_embeddings

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.datasets.embeddings.qmof_khot_embeddings#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

k-hot elemental embeddings from QMOF, motivated by the following Github Issue threads: +txie-93/cgcnn#2 +arosen93/QMOF#18

+
+

Module Contents#

+
+
+core.datasets.embeddings.qmof_khot_embeddings.QMOF_KHOT_EMBEDDINGS#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/index.html b/autoapi/core/datasets/index.html new file mode 100644 index 000000000..2321ef0cf --- /dev/null +++ b/autoapi/core/datasets/index.html @@ -0,0 +1,1444 @@ + + + + + + + + + + + core.datasets — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.datasets#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

AseDBDataset

This Dataset connects to an ASE Database, allowing the storage of atoms objects

AseReadDataset

This Dataset uses ase.io.read to load data from a directory on disk.

AseReadMultiStructureDataset

This Dataset can read multiple structures from each file using ase.io.read.

LMDBDatabase

Base class for all databases.

LmdbDataset

An abstract class representing a Dataset.

SinglePointLmdbDataset

An abstract class representing a Dataset.

TrajectoryLmdbDataset

An abstract class representing a Dataset.

OC22LmdbDataset

Dataset class to load from LMDB files containing relaxation

+
+
+

Functions#

+ + + + + + +

data_list_collater(→ torch_geometric.data.data.BaseData)

+
+
+class core.datasets.AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset connects to an ASE Database, allowing the storage of atoms objects +with a variety of backends including JSON, SQLite, and database server options.

+

For more information, see: +https://databases.fysik.dtu.dk/ase/ase/db/db.html

+
+
Parameters:
+
    +
  • config (dict) –

    +
    src (str): Either
      +
    • the path an ASE DB,

    • +
    • the connection address of an ASE DB,

    • +
    • a folder with multiple ASE DBs,

    • +
    • a list of folders with ASE DBs

    • +
    • a glob string to use to find ASE DBs, or

    • +
    • a list of ASE db paths/addresses.

    • +
    +

    If a folder, every file will be attempted as an ASE DB, and warnings +are raised for any files that can’t connect cleanly

    +

    Note that for large datasets, ID loading can be slow and there can be many +ids, so it’s advised to make loading the id list as easy as possible. There is not +an obvious way to get a full list of ids from most ASE dbs besides simply looping +through the entire dataset. See the AseLMDBDataset which was written with this usecase +in mind.

    +
    +
    +

    connect_args (dict): Keyword arguments for ase.db.connect()

    +
    +
    select_args (dict): Keyword arguments for ase.db.select()

    You can use this to query/filter your database

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the database

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +
    +
    transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config}

    where config is a dictionary specifying arguments to the transform_function

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
  • transform (callable, optional) – deprecated?

  • +
+
+
+
+
+_load_dataset_get_ids(config: dict) list[int]#
+
+ +
+
+get_atoms(idx: int) ase.Atoms#
+

Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. +:param idx: index in dataset +:type idx: int

+
+
Returns:
+

ASE atoms corresponding to datapoint idx

+
+
Return type:
+

atoms

+
+
+
+ +
+
+static connect_db(address: str | pathlib.Path, connect_args: dict | None = None) ase.db.core.Database#
+
+ +
+
+close_db() None#
+
+ +
+
+get_metadata(num_samples: int = 100) dict#
+
+ +
+
+abstract get_relaxed_energy(identifier)#
+
+ +
+ +
+
+class core.datasets.AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset uses ase.io.read to load data from a directory on disk. +This is intended for small-scale testing and demonstrations of OCP. +Larger datasets are better served by the efficiency of other dataset types +such as LMDB.

+

For a full list of ASE-readable filetypes, see +https://wiki.fysik.dtu.dk/ase/ase/io/io.html

+
+
Parameters:
+
    +
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    +
    +
    pattern (str): Filepath matching each file you want to read

    ex. “/POSCAR”, “.cif”, “.xyz” +search recursively with two wildcards: “*/POSCAR” or “**/*.cif”

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the files you read (ex. OUTCAR)

    +
    +
    +

    ase_read_args (dict): Keyword arguments for ase.io.read()

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file +(e.g. the last frame of a .traj).

    +
    +
    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +

    transform_args (dict): Additional keyword arguments for the transform callable

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
+
+
+
+
+_load_dataset_get_ids(config) list[pathlib.Path]#
+
+ +
+
+get_atoms(idx: str | int) ase.Atoms#
+
+ +
+
+get_relaxed_energy(identifier) float#
+
+ +
+ +
+
+class core.datasets.AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
+

Bases: AseAtomsDataset

+

This Dataset can read multiple structures from each file using ase.io.read. +The disadvantage is that all files must be read at startup. +This is a significant cost for large datasets.

+

This is intended for small-scale testing and demonstrations of OCP. +Larger datasets are better served by the efficiency of other dataset types +such as LMDB.

+

For a full list of ASE-readable filetypes, see +https://wiki.fysik.dtu.dk/ase/ase/io/io.html

+
+
Parameters:
+
    +
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    +
    +
    pattern (str): Filepath matching each file you want to read

    ex. “.traj”, “.xyz” +search recursively with two wildcards: “/POSCAR” or “/*.cif”

    +
    +
    index_file (str): Filepath to an indexing file, which contains each filename

    and the number of structures contained in each file. For instance:

    +

    /path/to/relaxation1.traj 200 +/path/to/relaxation2.traj 150

    +

    This will overrule the src and pattern that you specify!

    +
    +
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    +

    If you are using this for a training dataset, set +“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate +In that case, energy/forces must be in the files you read (ex. OUTCAR)

    +
    +
    +

    ase_read_args (dict): Keyword arguments for ase.io.read()

    +
    +
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). +Not recommended for large datasets.

    +
    +
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file +(e.g. the last frame of a .traj).

    +
    +
    +

    use_tqdm (bool): Use TQDM progress bar when initializing dataset

    +

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    +

    transform_args (dict): Additional keyword arguments for the transform callable

    +
    +
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if +the name is different.

    +
    +
    +

  • +
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms +object. Useful for applying tags, for example.

  • +
  • transform (callable, optional) – Additional preprocessing function for the Data object

  • +
+
+
+
+
+_load_dataset_get_ids(config) list[str]#
+
+ +
+
+get_atoms(idx: str) ase.Atoms#
+
+ +
+
+get_metadata(num_samples: int = 100) dict#
+
+ +
+
+get_relaxed_energy(identifier) float#
+
+ +
+ +
+
+class core.datasets.LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs)#
+

Bases: ase.db.core.Database

+

Base class for all databases.

+
+
+property metadata#
+

Load the metadata from the DB if present

+
+ +
+
+property _nextid#
+

Get the id of the next row to be written

+
+ +
+
+__enter__() typing_extensions.Self#
+
+ +
+
+__exit__(exc_type, exc_value, tb) None#
+
+ +
+
+close() None#
+
+ +
+
+_write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) None#
+
+ +
+
+_update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None)#
+
+ +
+
+_write_deleted_ids()#
+
+ +
+
+delete(ids: list[int]) None#
+

Delete rows.

+
+ +
+
+_get_row(idx: int, include_data: bool = True)#
+
+ +
+
+_get_row_by_index(index: int, include_data: bool = True)#
+

Auxiliary function to get the ith entry, rather than a specific id

+
+ +
+
+_select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all')#
+
+ +
+
+count(selection=None, **kwargs) int#
+

Count rows.

+

See the select() method for the selection syntax. Use db.count() or +len(db) to count all rows.

+
+ +
+
+_load_ids() None#
+

Load ids from the DB

+

Since ASE db ids are mostly 1-N integers, but can be missing entries +if ids have been deleted. To save space and operating under the assumption +that there will probably not be many deletions in most OCP datasets, +we just store the deleted ids.

+
+ +
+ +
+
+class core.datasets.LmdbDataset(config)#
+

Bases: torch.utils.data.Dataset[T_co]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+
+metadata_path: pathlib.Path#
+
+ +
+
+sharded: bool#
+

Dataset class to load from LMDB files containing relaxation +trajectories or single point computations. +Useful for Structure to Energy & Force (S2EF), Initial State to +Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. +The keys in the LMDB must be integers (stored as ascii objects) starting +from 0 through the length of the LMDB. For historical reasons any key named +“length” is ignored since that was used to infer length of many lmdbs in the same +folder, but lmdb lengths are now calculated directly from the number of keys. +:param config: Dataset configuration +:type config: dict

+
+ +
+
+__len__() int#
+
+ +
+
+__getitem__(idx: int) T_co#
+
+ +
+
+connect_db(lmdb_path: pathlib.Path | None = None) lmdb.Environment#
+
+ +
+
+close_db() None#
+
+ +
+
+get_metadata(num_samples: int = 100)#
+
+ +
+ +
+
+class core.datasets.SinglePointLmdbDataset(config, transform=None)#
+

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+ +
+
+class core.datasets.TrajectoryLmdbDataset(config, transform=None)#
+

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+ +
+
+core.datasets.data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) torch_geometric.data.data.BaseData#
+
+ +
+
+class core.datasets.OC22LmdbDataset(config, transform=None)#
+

Bases: torch.utils.data.Dataset

+

Dataset class to load from LMDB files containing relaxation +trajectories or single point computations.

+

Useful for Structure to Energy & Force (S2EF), Initial State to +Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.

+

The keys in the LMDB must be integers (stored as ascii objects) starting +from 0 through the length of the LMDB. For historical reasons any key named +“length” is ignored since that was used to infer length of many lmdbs in the same +folder, but lmdb lengths are now calculated directly from the number of keys.

+
+
Parameters:
+
    +
  • config (dict) – Dataset configuration

  • +
  • transform (callable, optional) – Data transform function. +(default: None)

  • +
+
+
+
+
+__len__() int#
+
+ +
+
+__getitem__(idx)#
+
+ +
+
+connect_db(lmdb_path=None)#
+
+ +
+
+close_db() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/lmdb_database/index.html b/autoapi/core/datasets/lmdb_database/index.html new file mode 100644 index 000000000..6a8c343c7 --- /dev/null +++ b/autoapi/core/datasets/lmdb_database/index.html @@ -0,0 +1,921 @@ + + + + + + + + + + + core.datasets.lmdb_database — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.datasets.lmdb_database#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is modified from the ASE db json backend +and is thus licensed under the corresponding LGPL2.1 license

+

The ASE notice for the LGPL2.1 license is available here: +ase/ase/-/blob/master/LICENSE

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

LMDBDatabase

Base class for all databases.

+
+
+

Attributes#

+ + + + + + +

RESERVED_KEYS

+
+
+core.datasets.lmdb_database.RESERVED_KEYS = ['nextid', 'metadata', 'deleted_ids']#
+
+ +
+
+class core.datasets.lmdb_database.LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs)#
+

Bases: ase.db.core.Database

+

Base class for all databases.

+
+
+property metadata#
+

Load the metadata from the DB if present

+
+ +
+
+property _nextid#
+

Get the id of the next row to be written

+
+ +
+
+__enter__() typing_extensions.Self#
+
+ +
+
+__exit__(exc_type, exc_value, tb) None#
+
+ +
+
+close() None#
+
+ +
+
+_write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) None#
+
+ +
+
+_update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None)#
+
+ +
+
+_write_deleted_ids()#
+
+ +
+
+delete(ids: list[int]) None#
+

Delete rows.

+
+ +
+
+_get_row(idx: int, include_data: bool = True)#
+
+ +
+
+_get_row_by_index(index: int, include_data: bool = True)#
+

Auxiliary function to get the ith entry, rather than a specific id

+
+ +
+
+_select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all')#
+
+ +
+
+count(selection=None, **kwargs) int#
+

Count rows.

+

See the select() method for the selection syntax. Use db.count() or +len(db) to count all rows.

+
+ +
+
+_load_ids() None#
+

Load ids from the DB

+

Since ASE db ids are mostly 1-N integers, but can be missing entries +if ids have been deleted. To save space and operating under the assumption +that there will probably not be many deletions in most OCP datasets, +we just store the deleted ids.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/lmdb_dataset/index.html b/autoapi/core/datasets/lmdb_dataset/index.html new file mode 100644 index 000000000..7e5adcf24 --- /dev/null +++ b/autoapi/core/datasets/lmdb_dataset/index.html @@ -0,0 +1,956 @@ + + + + + + + + + + + core.datasets.lmdb_dataset — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.datasets.lmdb_dataset#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

LmdbDataset

An abstract class representing a Dataset.

SinglePointLmdbDataset

An abstract class representing a Dataset.

TrajectoryLmdbDataset

An abstract class representing a Dataset.

+
+
+

Functions#

+ + + + + + +

data_list_collater(→ torch_geometric.data.data.BaseData)

+
+
+

Attributes#

+ + + + + + +

T_co

+
+
+core.datasets.lmdb_dataset.T_co#
+
+ +
+
+class core.datasets.lmdb_dataset.LmdbDataset(config)#
+

Bases: torch.utils.data.Dataset[T_co]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+
+metadata_path: pathlib.Path#
+
+ +
+
+sharded: bool#
+

Dataset class to load from LMDB files containing relaxation +trajectories or single point computations. +Useful for Structure to Energy & Force (S2EF), Initial State to +Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. +The keys in the LMDB must be integers (stored as ascii objects) starting +from 0 through the length of the LMDB. For historical reasons any key named +“length” is ignored since that was used to infer length of many lmdbs in the same +folder, but lmdb lengths are now calculated directly from the number of keys. +:param config: Dataset configuration +:type config: dict

+
+ +
+
+__len__() int#
+
+ +
+
+__getitem__(idx: int) T_co#
+
+ +
+
+connect_db(lmdb_path: pathlib.Path | None = None) lmdb.Environment#
+
+ +
+
+close_db() None#
+
+ +
+
+get_metadata(num_samples: int = 100)#
+
+ +
+ +
+
+class core.datasets.lmdb_dataset.SinglePointLmdbDataset(config, transform=None)#
+

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+ +
+
+class core.datasets.lmdb_dataset.TrajectoryLmdbDataset(config, transform=None)#
+

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

+

An abstract class representing a Dataset.

+

All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overwrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader. Subclasses could also +optionally implement __getitems__(), for speedup batched samples +loading. This method accepts list of indices of samples of batch and returns +list of samples.

+
+

Note

+

DataLoader by default constructs an index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

+
+
+ +
+
+core.datasets.lmdb_dataset.data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) torch_geometric.data.data.BaseData#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/oc22_lmdb_dataset/index.html b/autoapi/core/datasets/oc22_lmdb_dataset/index.html new file mode 100644 index 000000000..2189d24d7 --- /dev/null +++ b/autoapi/core/datasets/oc22_lmdb_dataset/index.html @@ -0,0 +1,834 @@ + + + + + + + + + + + core.datasets.oc22_lmdb_dataset — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.datasets.oc22_lmdb_dataset

+ +
+ +
+
+ + + + +
+ +
+

core.datasets.oc22_lmdb_dataset#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

OC22LmdbDataset

Dataset class to load from LMDB files containing relaxation

+
+
+class core.datasets.oc22_lmdb_dataset.OC22LmdbDataset(config, transform=None)#
+

Bases: torch.utils.data.Dataset

+

Dataset class to load from LMDB files containing relaxation +trajectories or single point computations.

+

Useful for Structure to Energy & Force (S2EF), Initial State to +Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.

+

The keys in the LMDB must be integers (stored as ascii objects) starting +from 0 through the length of the LMDB. For historical reasons any key named +“length” is ignored since that was used to infer length of many lmdbs in the same +folder, but lmdb lengths are now calculated directly from the number of keys.

+
+
Parameters:
+
    +
  • config (dict) – Dataset configuration

  • +
  • transform (callable, optional) – Data transform function. +(default: None)

  • +
+
+
+
+
+__len__() int#
+
+ +
+
+__getitem__(idx)#
+
+ +
+
+connect_db(lmdb_path=None)#
+
+ +
+
+close_db() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/datasets/target_metadata_guesser/index.html b/autoapi/core/datasets/target_metadata_guesser/index.html new file mode 100644 index 000000000..bc5ce46d5 --- /dev/null +++ b/autoapi/core/datasets/target_metadata_guesser/index.html @@ -0,0 +1,831 @@ + + + + + + + + + + + core.datasets.target_metadata_guesser — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.datasets.target_metadata_guesser#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + +

uniform_atoms_lengths(→ bool)

target_constant_shape(→ bool)

target_per_atom(→ bool)

target_extensive(atoms_lens, target_samples[, threshold])

guess_target_metadata(atoms_len, target_samples)

guess_property_metadata(atoms_list)

+
+
+core.datasets.target_metadata_guesser.uniform_atoms_lengths(atoms_lens) bool#
+
+ +
+
+core.datasets.target_metadata_guesser.target_constant_shape(atoms_lens, target_samples) bool#
+
+ +
+
+core.datasets.target_metadata_guesser.target_per_atom(atoms_lens, target_samples) bool#
+
+ +
+
+core.datasets.target_metadata_guesser.target_extensive(atoms_lens, target_samples, threshold: float = 0.2)#
+
+ +
+
+core.datasets.target_metadata_guesser.guess_target_metadata(atoms_len, target_samples)#
+
+ +
+
+core.datasets.target_metadata_guesser.guess_property_metadata(atoms_list)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/index.html b/autoapi/core/index.html new file mode 100644 index 000000000..7a482b1f7 --- /dev/null +++ b/autoapi/core/index.html @@ -0,0 +1,948 @@ + + + + + + + + + + + core — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Subpackages#

+
+ +
+
+
+

Submodules#

+
+ +
+
+
+

Package Contents#

+
+
+core.__version__#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/base/index.html b/autoapi/core/models/base/index.html new file mode 100644 index 000000000..3eb601bfb --- /dev/null +++ b/autoapi/core/models/base/index.html @@ -0,0 +1,849 @@ + + + + + + + + + + + core.models.base — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.base

+ +
+ +
+
+ + + + +
+ +
+

core.models.base#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

BaseModel

Base class for all neural network modules.

+
+
+class core.models.base.BaseModel(num_atoms=None, bond_feat_dim=None, num_targets=None)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+property num_params: int#
+
+ +
+
+abstract forward(data)#
+
+ +
+
+generate_graph(data, cutoff=None, max_neighbors=None, use_pbc=None, otf_graph=None, enforce_max_neighbors_strictly=None)#
+
+ +
+
+no_weight_decay() list#
+

Returns a list of parameters with no weight decay.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/dimenet_plus_plus/index.html b/autoapi/core/models/dimenet_plus_plus/index.html new file mode 100644 index 000000000..1082a6dd0 --- /dev/null +++ b/autoapi/core/models/dimenet_plus_plus/index.html @@ -0,0 +1,1066 @@ + + + + + + + + + + + core.models.dimenet_plus_plus — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.dimenet_plus_plus#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

+

This code borrows heavily from the DimeNet implementation as part of +pytorch-geometric: rusty1s/pytorch_geometric. License:

+

+

Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>

+

Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions:

+

The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software.

+

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

InteractionPPBlock

Base class for all neural network modules.

OutputPPBlock

Base class for all neural network modules.

DimeNetPlusPlus

DimeNet++ implementation based on klicperajo/dimenet.

DimeNetPlusPlusWrap

DimeNet++ implementation based on klicperajo/dimenet.

+
+
+

Attributes#

+ + + + + + +

sym

+
+
+core.models.dimenet_plus_plus.sym#
+
+ +
+
+class core.models.dimenet_plus_plus.InteractionPPBlock(hidden_channels: int, int_emb_size: int, basis_emb_size: int, num_spherical: int, num_radial: int, num_before_skip: int, num_after_skip: int, act='silu')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(x, rbf, sbf, idx_kj, idx_ji)#
+
+ +
+ +
+
+class core.models.dimenet_plus_plus.OutputPPBlock(num_radial: int, hidden_channels: int, out_emb_channels: int, out_channels: int, num_layers: int, act: str = 'silu')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(x, rbf, i, num_nodes: int | None = None)#
+
+ +
+ +
+
+class core.models.dimenet_plus_plus.DimeNetPlusPlus(hidden_channels: int, out_channels: int, num_blocks: int, int_emb_size: int, basis_emb_size: int, out_emb_channels: int, num_spherical: int, num_radial: int, cutoff: float = 5.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, act: str = 'silu')#
+

Bases: torch.nn.Module

+

DimeNet++ implementation based on klicperajo/dimenet.

+
+
Parameters:
+
    +
  • hidden_channels (int) – Hidden embedding size.

  • +
  • out_channels (int) – Size of each output sample.

  • +
  • num_blocks (int) – Number of building blocks.

  • +
  • int_emb_size (int) – Embedding size used for interaction triplets

  • +
  • basis_emb_size (int) – Embedding size used in the basis transformation

  • +
  • out_emb_channels (int) – Embedding size used for atoms in the output block

  • +
  • num_spherical (int) – Number of spherical harmonics.

  • +
  • num_radial (int) – Number of radial basis functions.

  • +
  • cutoff – (float, optional): Cutoff distance for interatomic +interactions. (default: 5.0)

  • +
  • envelope_exponent (int, optional) – Shape of the smooth cutoff. +(default: 5)

  • +
  • num_before_skip – (int, optional): Number of residual layers in the +interaction blocks before the skip connection. (default: 1)

  • +
  • num_after_skip – (int, optional): Number of residual layers in the +interaction blocks after the skip connection. (default: 2)

  • +
  • num_output_layers – (int, optional): Number of linear layers for the +output blocks. (default: 3)

  • +
  • act – (function, optional): The activation funtion. +(default: silu)

  • +
+
+
+
+
+url = 'https://github.com/klicperajo/dimenet/raw/master/pretrained'#
+
+ +
+
+reset_parameters() None#
+
+ +
+
+triplets(edge_index, cell_offsets, num_nodes: int)#
+
+ +
+
+abstract forward(z, pos, batch=None)#
+
+ +
+ +
+
+class core.models.dimenet_plus_plus.DimeNetPlusPlusWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, hidden_channels: int = 128, num_blocks: int = 4, int_emb_size: int = 64, basis_emb_size: int = 8, out_emb_channels: int = 256, num_spherical: int = 7, num_radial: int = 6, otf_graph: bool = False, cutoff: float = 10.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3)#
+

Bases: DimeNetPlusPlus, fairchem.core.models.base.BaseModel

+

DimeNet++ implementation based on klicperajo/dimenet.

+
+
Parameters:
+
    +
  • hidden_channels (int) – Hidden embedding size.

  • +
  • out_channels (int) – Size of each output sample.

  • +
  • num_blocks (int) – Number of building blocks.

  • +
  • int_emb_size (int) – Embedding size used for interaction triplets

  • +
  • basis_emb_size (int) – Embedding size used in the basis transformation

  • +
  • out_emb_channels (int) – Embedding size used for atoms in the output block

  • +
  • num_spherical (int) – Number of spherical harmonics.

  • +
  • num_radial (int) – Number of radial basis functions.

  • +
  • cutoff – (float, optional): Cutoff distance for interatomic +interactions. (default: 5.0)

  • +
  • envelope_exponent (int, optional) – Shape of the smooth cutoff. +(default: 5)

  • +
  • num_before_skip – (int, optional): Number of residual layers in the +interaction blocks before the skip connection. (default: 1)

  • +
  • num_after_skip – (int, optional): Number of residual layers in the +interaction blocks after the skip connection. (default: 2)

  • +
  • num_output_layers – (int, optional): Number of linear layers for the +output blocks. (default: 3)

  • +
  • act – (function, optional): The activation funtion. +(default: silu)

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+_forward(data)#
+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/activation/index.html b/autoapi/core/models/equiformer_v2/activation/index.html new file mode 100644 index 000000000..210f1fc33 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/activation/index.html @@ -0,0 +1,1253 @@ + + + + + + + + + + + core.models.equiformer_v2.activation — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.activation#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ScaledSiLU

Base class for all neural network modules.

ScaledSwiGLU

Base class for all neural network modules.

SwiGLU

Base class for all neural network modules.

SmoothLeakyReLU

Base class for all neural network modules.

ScaledSmoothLeakyReLU

Base class for all neural network modules.

ScaledSigmoid

Base class for all neural network modules.

GateActivation

Base class for all neural network modules.

S2Activation

Assume we only have one resolution

SeparableS2Activation

Base class for all neural network modules.

+
+
+class core.models.equiformer_v2.activation.ScaledSiLU(inplace: bool = False)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(inputs)#
+
+ +
+
+extra_repr()#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.activation.ScaledSwiGLU(in_channels: int, out_channels: int, bias: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(inputs)#
+
+ +
+ +
+
+class core.models.equiformer_v2.activation.SwiGLU(in_channels: int, out_channels: int, bias: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(inputs)#
+
+ +
+ +
+
+class core.models.equiformer_v2.activation.SmoothLeakyReLU(negative_slope: float = 0.2)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x)#
+
+ +
+
+extra_repr()#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.activation.ScaledSmoothLeakyReLU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x)#
+
+ +
+
+extra_repr()#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.activation.ScaledSigmoid#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.equiformer_v2.activation.GateActivation(lmax: int, mmax: int, num_channels: int)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(gating_scalars, input_tensors)#
+

gating_scalars: shape [N, lmax * num_channels] +input_tensors: shape [N, (lmax + 1) ** 2, num_channels]

+
+ +
+ +
+
+class core.models.equiformer_v2.activation.S2Activation(lmax: int, mmax: int)#
+

Bases: torch.nn.Module

+

Assume we only have one resolution

+
+
+forward(inputs, SO3_grid)#
+
+ +
+ +
+
+class core.models.equiformer_v2.activation.SeparableS2Activation(lmax: int, mmax: int)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input_scalars, input_tensors, SO3_grid)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/drop/index.html b/autoapi/core/models/equiformer_v2/drop/index.html new file mode 100644 index 000000000..90f02d54f --- /dev/null +++ b/autoapi/core/models/equiformer_v2/drop/index.html @@ -0,0 +1,1047 @@ + + + + + + + + + + + core.models.equiformer_v2.drop — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.drop#

+

Add extra_repr into DropPath implemented by timm +for displaying more info.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

DropPath

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

GraphDropPath

Consider batch for graph data when dropping paths.

EquivariantDropout

Base class for all neural network modules.

EquivariantScalarsDropout

Base class for all neural network modules.

EquivariantDropoutArraySphericalHarmonics

Base class for all neural network modules.

+
+
+

Functions#

+ + + + + + +

drop_path(→ torch.Tensor)

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

+
+
+core.models.equiformer_v2.drop.drop_path(x: torch.Tensor, drop_prob: float = 0.0, training: bool = False) torch.Tensor#
+

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). +This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, +the original name is misleading as ‘Drop Connect’ is a different form of dropout in a separate paper… +See discussion: tensorflow/tpu#494 … I’ve opted for +changing the layer and argument names to ‘drop path’ rather than mix DropConnect as a layer name and use +‘survival rate’ as the argument.

+
+ +
+
+class core.models.equiformer_v2.drop.DropPath(drop_prob: float)#
+

Bases: torch.nn.Module

+

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+
+extra_repr() str#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.drop.GraphDropPath(drop_prob: float)#
+

Bases: torch.nn.Module

+

Consider batch for graph data when dropping paths.

+
+
+forward(x: torch.Tensor, batch) torch.Tensor#
+
+ +
+
+extra_repr() str#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.drop.EquivariantDropout(irreps, drop_prob: float)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.equiformer_v2.drop.EquivariantScalarsDropout(irreps, drop_prob: float)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+
+extra_repr() str#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+class core.models.equiformer_v2.drop.EquivariantDropoutArraySphericalHarmonics(drop_prob: float, drop_graph: bool = False)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor, batch=None) torch.Tensor#
+
+ +
+
+extra_repr() str#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/edge_rot_mat/index.html b/autoapi/core/models/equiformer_v2/edge_rot_mat/index.html new file mode 100644 index 000000000..75f0508e7 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/edge_rot_mat/index.html @@ -0,0 +1,781 @@ + + + + + + + + + + + core.models.equiformer_v2.edge_rot_mat — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.edge_rot_mat

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.edge_rot_mat#

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

init_edge_rot_mat(edge_distance_vec)

+
+
+core.models.equiformer_v2.edge_rot_mat.init_edge_rot_mat(edge_distance_vec)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.html b/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.html new file mode 100644 index 000000000..6ed30d2a1 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/equiformer_v2_oc20/index.html @@ -0,0 +1,915 @@ + + + + + + + + + + + core.models.equiformer_v2.equiformer_v2_oc20 — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.equiformer_v2_oc20#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

EquiformerV2_OC20

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

+
+
+

Attributes#

+ + + + + + + + + +

_AVG_NUM_NODES

_AVG_DEGREE

+
+
+core.models.equiformer_v2.equiformer_v2_oc20._AVG_NUM_NODES = 77.81317#
+
+ +
+
+core.models.equiformer_v2.equiformer_v2_oc20._AVG_DEGREE = 23.395238876342773#
+
+ +
+
+class core.models.equiformer_v2.equiformer_v2_oc20.EquiformerV2_OC20(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False)#
+

Bases: fairchem.core.models.base.BaseModel

+

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_neighbors (int) – Maximum number of neighbors per atom

  • +
  • max_radius (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_layers (int) – Number of layers in the GNN

  • +
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • +
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • +
  • num_heads (int) – Number of attention heads

  • +
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • +
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • +
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • +
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’, ‘rms_norm_sh’])

  • +
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • +
  • grid_resolution (int) – Resolution of SO3_Grid

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • edge_channels (int) – Number of channels for the edge invariant features

  • +
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • +
  • share_atom_edge_embedding (bool) – Whether to share atom_edge_embedding across all blocks

  • +
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • +
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • +
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • +
  • ffn_activation (str) – Type of activation function for feedforward network

  • +
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • +
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFNs.

  • +
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • +
  • alpha_drop (float) – Dropout rate for attention weights

  • +
  • drop_path_rate (float) – Drop path rate

  • +
  • proj_drop (float) – Dropout rate for outputs of attention and FFN in Transformer blocks

  • +
  • weight_init (str) – [‘normal’, ‘uniform’] initialization of weights of linear layers except those in radial functions

  • +
  • enforce_max_neighbors_strictly (bool) – When edges are subselected based on the max_neighbors arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number.

  • +
  • avg_num_nodes (float) – Average number of nodes per graph

  • +
  • avg_degree (float) – Average degree of nodes in the graph

  • +
  • use_energy_lin_ref (bool) – Whether to add the per-atom energy references during prediction. +During training and validation, this should be kept False since we use the lin_ref parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. +During prediction (where we don’t have energy targets), this can be set to True to add the per-atom linear references to the predicted energies.

  • +
  • load_energy_lin_ref (bool) – Whether to add nn.Parameters for the per-element energy references. +This additional flag is there to ensure compatibility when strict-loading checkpoints, since the use_energy_lin_ref flag can be either True or False even if the model is trained with linear references. +You can’t have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+forward(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+
+_init_weights(m)#
+
+ +
+
+_uniform_init_rad_func_linear_weights(m)#
+
+ +
+
+_uniform_init_linear_weights(m)#
+
+ +
+
+no_weight_decay() set#
+

Returns a list of parameters with no weight decay.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/gaussian_rbf/index.html b/autoapi/core/models/equiformer_v2/gaussian_rbf/index.html new file mode 100644 index 000000000..d72a3cee3 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/gaussian_rbf/index.html @@ -0,0 +1,854 @@ + + + + + + + + + + + core.models.equiformer_v2.gaussian_rbf — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.gaussian_rbf

+ +
+ +
+
+ + + + +
+ +
+

core.models.equiformer_v2.gaussian_rbf#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

GaussianRadialBasisLayer

Base class for all neural network modules.

+
+
+

Functions#

+ + + + + + +

gaussian(→ torch.Tensor)

+
+
+core.models.equiformer_v2.gaussian_rbf.gaussian(x: torch.Tensor, mean, std) torch.Tensor#
+
+ +
+
+class core.models.equiformer_v2.gaussian_rbf.GaussianRadialBasisLayer(num_basis: int, cutoff: float)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist: torch.Tensor, node_atom=None, edge_src=None, edge_dst=None)#
+
+ +
+
+extra_repr()#
+

Set the extra representation of the module.

+

To print customized extra information, you should re-implement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/index.html b/autoapi/core/models/equiformer_v2/index.html new file mode 100644 index 000000000..4436c25d9 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/index.html @@ -0,0 +1,923 @@ + + + + + + + + + + + core.models.equiformer_v2 — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

EquiformerV2

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

+
+
+class core.models.equiformer_v2.EquiformerV2(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False)#
+

Bases: fairchem.core.models.base.BaseModel

+

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_neighbors (int) – Maximum number of neighbors per atom

  • +
  • max_radius (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_layers (int) – Number of layers in the GNN

  • +
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • +
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • +
  • num_heads (int) – Number of attention heads

  • +
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • +
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • +
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • +
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’, ‘rms_norm_sh’])

  • +
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • +
  • grid_resolution (int) – Resolution of SO3_Grid

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • edge_channels (int) – Number of channels for the edge invariant features

  • +
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • +
  • share_atom_edge_embedding (bool) – Whether to share atom_edge_embedding across all blocks

  • +
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • +
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • +
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • +
  • ffn_activation (str) – Type of activation function for feedforward network

  • +
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • +
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFNs.

  • +
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • +
  • alpha_drop (float) – Dropout rate for attention weights

  • +
  • drop_path_rate (float) – Drop path rate

  • +
  • proj_drop (float) – Dropout rate for outputs of attention and FFN in Transformer blocks

  • +
  • weight_init (str) – [‘normal’, ‘uniform’] initialization of weights of linear layers except those in radial functions

  • +
  • enforce_max_neighbors_strictly (bool) – When edges are subselected based on the max_neighbors arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number.

  • +
  • avg_num_nodes (float) – Average number of nodes per graph

  • +
  • avg_degree (float) – Average degree of nodes in the graph

  • +
  • use_energy_lin_ref (bool) – Whether to add the per-atom energy references during prediction. +During training and validation, this should be kept False since we use the lin_ref parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. +During prediction (where we don’t have energy targets), this can be set to True to add the per-atom linear references to the predicted energies.

  • +
  • load_energy_lin_ref (bool) – Whether to add nn.Parameters for the per-element energy references. +This additional flag is there to ensure compatibility when strict-loading checkpoints, since the use_energy_lin_ref flag can be either True or False even if the model is trained with linear references. +You can’t have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+forward(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+
+_init_weights(m)#
+
+ +
+
+_uniform_init_rad_func_linear_weights(m)#
+
+ +
+
+_uniform_init_linear_weights(m)#
+
+ +
+
+no_weight_decay() set#
+

Returns a list of parameters with no weight decay.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/input_block/index.html b/autoapi/core/models/equiformer_v2/input_block/index.html new file mode 100644 index 000000000..e3faa5d18 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/input_block/index.html @@ -0,0 +1,814 @@ + + + + + + + + + + + core.models.equiformer_v2.input_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.input_block

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.input_block#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

EdgeDegreeEmbedding

+
param sphere_channels:
+

Number of spherical channels

+
+
+

+
+
+class core.models.equiformer_v2.input_block.EdgeDegreeEmbedding(sphere_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool, rescale_factor)#
+

Bases: torch.nn.Module

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • +
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. +The last one will be used as hidden size when use_atom_edge_embedding is True.

  • +
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • +
  • rescale_factor (float) – Rescale the sum aggregation

  • +
+
+
+
+
+forward(atomic_numbers, edge_distance, edge_index)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/layer_norm/index.html b/autoapi/core/models/equiformer_v2/layer_norm/index.html new file mode 100644 index 000000000..d221b8183 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/layer_norm/index.html @@ -0,0 +1,1017 @@ + + + + + + + + + + + core.models.equiformer_v2.layer_norm — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.layer_norm#

+

1. Normalize features of shape (N, sphere_basis, C), +with sphere_basis = (lmax + 1) ** 2.

+

2. The difference from layer_norm.py is that all type-L vectors have +the same number of channels and input features are of shape (N, sphere_basis, C).

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

EquivariantLayerNormArray

Base class for all neural network modules.

EquivariantLayerNormArraySphericalHarmonics

    +
  1. Normalize over L = 0.

  2. +
+

EquivariantRMSNormArraySphericalHarmonics

    +
  1. Normalize across all m components from degrees L >= 0.

  2. +
+

EquivariantRMSNormArraySphericalHarmonicsV2

    +
  1. Normalize across all m components from degrees L >= 0.

  2. +
+

EquivariantDegreeLayerScale

    +
  1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV'21)), we scale the output of both attention and FFN.

  2. +
+

+
+
+

Functions#

+ + + + + + + + + +

get_normalization_layer(norm_type, lmax, num_channels)

get_l_to_all_m_expand_index(lmax)

+
+
+core.models.equiformer_v2.layer_norm.get_normalization_layer(norm_type: str, lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
+
+ +
+
+core.models.equiformer_v2.layer_norm.get_l_to_all_m_expand_index(lmax: int)#
+
+ +
+
+class core.models.equiformer_v2.layer_norm.EquivariantLayerNormArray(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+
+forward(node_input)#
+

Assume input is of shape [N, sphere_basis, C]

+
+ +
+ +
+
+class core.models.equiformer_v2.layer_norm.EquivariantLayerNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', std_balance_degrees: bool = True)#
+

Bases: torch.nn.Module

+
    +
  1. Normalize over L = 0.

  2. +
  3. Normalize across all m components from degrees L > 0.

  4. +
  5. Do not normalize separately for different L (L > 0).

  6. +
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+
+forward(node_input)#
+

Assume input is of shape [N, sphere_basis, C]

+
+ +
+ +
+
+class core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
+

Bases: torch.nn.Module

+
    +
  1. Normalize across all m components from degrees L >= 0.

  2. +
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+
+forward(node_input)#
+

Assume input is of shape [N, sphere_basis, C]

+
+ +
+ +
+
+class core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonicsV2(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', centering: bool = True, std_balance_degrees: bool = True)#
+

Bases: torch.nn.Module

+
    +
  1. Normalize across all m components from degrees L >= 0.

  2. +
  3. Expand weights and multiply with normalized feature to prevent slicing and concatenation.

  4. +
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+
+forward(node_input)#
+

Assume input is of shape [N, sphere_basis, C]

+
+ +
+ +
+
+class core.models.equiformer_v2.layer_norm.EquivariantDegreeLayerScale(lmax: int, num_channels: int, scale_factor: float = 2.0)#
+

Bases: torch.nn.Module

+
    +
  1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV’21)), we scale the output of both attention and FFN.

  2. +
  3. For degree L > 0, we scale down the square root of 2 * L, which is to emulate halving the number of channels when using higher L.

  4. +
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+
+forward(node_input)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/module_list/index.html b/autoapi/core/models/equiformer_v2/module_list/index.html new file mode 100644 index 000000000..55514a92a --- /dev/null +++ b/autoapi/core/models/equiformer_v2/module_list/index.html @@ -0,0 +1,816 @@ + + + + + + + + + + + core.models.equiformer_v2.module_list — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.module_list

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.module_list#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

ModuleListInfo

Holds submodules in a list.

+
+
+class core.models.equiformer_v2.module_list.ModuleListInfo(info_str, modules=None)#
+

Bases: torch.nn.ModuleList

+

Holds submodules in a list.

+

ModuleList can be indexed like a regular Python list, but +modules it contains are properly registered, and will be visible by all +Module methods.

+
+
Parameters:
+

modules (iterable, optional) – an iterable of modules to add

+
+
+

Example:

+
class MyModule(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
+
+    def forward(self, x):
+        # ModuleList can act as an iterable, or be indexed using ints
+        for i, l in enumerate(self.linears):
+            x = self.linears[i // 2](x) + l(x)
+        return x
+
+
+
+
+__repr__() str#
+

Return a custom repr for ModuleList that compresses repeated module representations.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/radial_function/index.html b/autoapi/core/models/equiformer_v2/radial_function/index.html new file mode 100644 index 000000000..046dd0c98 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/radial_function/index.html @@ -0,0 +1,794 @@ + + + + + + + + + + + core.models.equiformer_v2.radial_function — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.radial_function

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.radial_function#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

RadialFunction

Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels

+
+
+class core.models.equiformer_v2.radial_function.RadialFunction(channels_list)#
+

Bases: torch.nn.Module

+

Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels

+
+
+forward(inputs)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/so2_ops/index.html b/autoapi/core/models/equiformer_v2/so2_ops/index.html new file mode 100644 index 000000000..a9da56fcc --- /dev/null +++ b/autoapi/core/models/equiformer_v2/so2_ops/index.html @@ -0,0 +1,878 @@ + + + + + + + + + + + core.models.equiformer_v2.so2_ops — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.so2_ops

+ +
+ +
+
+ + + + +
+ +
+

core.models.equiformer_v2.so2_ops#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

SO2_m_Convolution

SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m

SO2_Convolution

SO(2) Block: Perform SO(2) convolutions for all m (orders)

SO2_Linear

SO(2) Linear: Perform SO(2) linear for all m (orders).

+
+
+class core.models.equiformer_v2.so2_ops.SO2_m_Convolution(m: int, sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int])#
+

Bases: torch.nn.Module

+

SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m

+
+
Parameters:
+
    +
  • m (int) – Order of the spherical harmonic coefficients

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
+
+
+
+
+forward(x_m)#
+
+ +
+ +
+
+class core.models.equiformer_v2.so2_ops.SO2_Convolution(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = True, edge_channels_list: list[int] | None = None, extra_m0_output_channels: int | None = None)#
+

Bases: torch.nn.Module

+

SO(2) Block: Perform SO(2) convolutions for all m (orders)

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • +
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • mappingReduced (CoefficientMappingModule) – Used to extract a subset of m components

  • +
  • internal_weights (bool) – If True, not using radial function to multiply inputs features

  • +
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels].

  • +
  • extra_m0_output_channels (int) – If not None, return out_embedding (SO3_Embedding) and extra_m0_features (Tensor).

  • +
+
+
+
+
+forward(x, x_edge)#
+
+ +
+ +
+
+class core.models.equiformer_v2.so2_ops.SO2_Linear(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = False, edge_channels_list: list[int] | None = None)#
+

Bases: torch.nn.Module

+

SO(2) Linear: Perform SO(2) linear for all m (orders).

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • +
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • mappingReduced (CoefficientMappingModule) – Used to extract a subset of m components

  • +
  • internal_weights (bool) – If True, not using radial function to multiply inputs features

  • +
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels].

  • +
+
+
+
+
+forward(x, x_edge)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/so3/index.html b/autoapi/core/models/equiformer_v2/so3/index.html new file mode 100644 index 000000000..637db4afa --- /dev/null +++ b/autoapi/core/models/equiformer_v2/so3/index.html @@ -0,0 +1,1169 @@ + + + + + + + + + + + core.models.equiformer_v2.so3 — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.so3#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + +

CoefficientMappingModule

Helper module for coefficients used to reshape lval <--> m and to get coefficients of specific degree or order

SO3_Embedding

Helper functions for performing operations on irreps embedding

SO3_Rotation

Helper functions for Wigner-D rotations

SO3_Grid

Helper functions for grid representation of the irreps

SO3_Linear

Base class for all neural network modules.

SO3_LinearV2

Base class for all neural network modules.

+
+
+class core.models.equiformer_v2.so3.CoefficientMappingModule(lmax_list: list[int], mmax_list: list[int])#
+

Bases: torch.nn.Module

+

Helper module for coefficients used to reshape lval <–> m and to get coefficients of specific degree or order

+
+
Parameters:
+
    +
  • (list (mmax_list) – int): List of maximum degree of the spherical harmonics

  • +
  • (list – int): List of maximum order of the spherical harmonics

  • +
+
+
+
+
+complex_idx(m: int, lmax: int, m_complex, l_harmonic)#
+

Add m_complex and l_harmonic to the input arguments +since we cannot use self.m_complex.

+
+ +
+
+coefficient_idx(lmax: int, mmax: int)#
+
+ +
+
+get_rotate_inv_rescale(lmax: int, mmax: int)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+class core.models.equiformer_v2.so3.SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype)#
+

Helper functions for performing operations on irreps embedding

+
+
Parameters:
+
    +
  • length (int) – Batch size

  • +
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • +
  • num_channels (int) – Number of channels

  • +
  • device – Device of the output

  • +
  • dtype – type of the output tensors

  • +
+
+
+
+
+clone() SO3_Embedding#
+
+ +
+
+set_embedding(embedding) None#
+
+ +
+
+set_lmax_mmax(lmax_list: list[int], mmax_list: list[int]) None#
+
+ +
+
+_expand_edge(edge_index: torch.Tensor) None#
+
+ +
+
+expand_edge(edge_index: torch.Tensor)#
+
+ +
+
+_reduce_edge(edge_index: torch.Tensor, num_nodes: int)#
+
+ +
+
+_m_primary(mapping)#
+
+ +
+
+_l_primary(mapping)#
+
+ +
+
+_rotate(SO3_rotation, lmax_list: list[int], mmax_list: list[int])#
+
+ +
+
+_rotate_inv(SO3_rotation, mappingReduced)#
+
+ +
+
+_grid_act(SO3_grid, act, mappingReduced)#
+
+ +
+
+to_grid(SO3_grid, lmax=-1)#
+
+ +
+
+_from_grid(x_grid, SO3_grid, lmax: int = -1)#
+
+ +
+ +
+
+class core.models.equiformer_v2.so3.SO3_Rotation(lmax: int)#
+

Bases: torch.nn.Module

+

Helper functions for Wigner-D rotations

+
+
Parameters:
+

(list (lmax_list) – int): List of maximum degree of the spherical harmonics

+
+
+
+
+set_wigner(rot_mat3x3)#
+
+ +
+
+rotate(embedding, out_lmax: int, out_mmax: int)#
+
+ +
+
+rotate_inv(embedding, in_lmax: int, in_mmax: int)#
+
+ +
+
+RotationToWignerDMatrix(edge_rot_mat, start_lmax: int, end_lmax: int) torch.Tensor#
+
+ +
+ +
+
+class core.models.equiformer_v2.so3.SO3_Grid(lmax: int, mmax: int, normalization: str = 'integral', resolution: int | None = None)#
+

Bases: torch.nn.Module

+

Helper functions for grid representation of the irreps

+
+
Parameters:
+
    +
  • lmax (int) – Maximum degree of the spherical harmonics

  • +
  • mmax (int) – Maximum order of the spherical harmonics

  • +
+
+
+
+
+get_to_grid_mat(device)#
+
+ +
+
+get_from_grid_mat(device)#
+
+ +
+
+to_grid(embedding, lmax: int, mmax: int)#
+
+ +
+
+from_grid(grid, lmax: int, mmax: int)#
+
+ +
+ +
+
+class core.models.equiformer_v2.so3.SO3_Linear(in_features: int, out_features: int, lmax: int, bias: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input_embedding, output_scale=None)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+class core.models.equiformer_v2.so3.SO3_LinearV2(in_features: int, out_features: int, lmax: int, bias: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input_embedding)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.html b/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.html new file mode 100644 index 000000000..47af7f6e4 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/trainers/energy_trainer/index.html @@ -0,0 +1,835 @@ + + + + + + + + + + + core.models.equiformer_v2.trainers.energy_trainer — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.trainers.energy_trainer

+ +
+ +
+
+ + + + +
+ +
+

core.models.equiformer_v2.trainers.energy_trainer#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

EquiformerV2EnergyTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

+
+
+class core.models.equiformer_v2.trainers.energy_trainer.EquiformerV2EnergyTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
+

Bases: fairchem.core.trainers.OCPTrainer

+

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to +Relaxed State (IS2RS) tasks.

+
+

Note

+

Examples of configurations for task, model, dataset and optimizer +can be found in configs/ocp_s2ef +and configs/ocp_is2rs.

+
+
+
Parameters:
+
    +
  • task (dict) – Task configuration.

  • +
  • model (dict) – Model configuration.

  • +
  • outputs (dict) – Output property configuration.

  • +
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • +
  • optimizer (dict) – Optimizer configuration.

  • +
  • loss_fns (dict) – Loss function configuration.

  • +
  • eval_metrics (dict) – Evaluation metrics configuration.

  • +
  • identifier (str) – Experiment identifier that is appended to log directory.

  • +
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. +(default: None)

  • +
  • is_debug (bool, optional) – Run in debug mode. +(default: False)

  • +
  • print_every (int, optional) – Frequency of printing logs. +(default: 100)

  • +
  • seed (int, optional) – Random number seed. +(default: None)

  • +
  • logger (str, optional) – Type of logger to be used. +(default: wandb)

  • +
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. +(default: 0)

  • +
  • amp (bool, optional) – Run using automatic mixed precision. +(default: False)

  • +
  • slurm (dict) – Slurm configuration. Currently just for keeping track. +(default: {})

  • +
  • noddp (bool, optional) – Run model without DDP.

  • +
+
+
+
+
+load_extras()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.html b/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.html new file mode 100644 index 000000000..9858ad5f1 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/trainers/forces_trainer/index.html @@ -0,0 +1,835 @@ + + + + + + + + + + + core.models.equiformer_v2.trainers.forces_trainer — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.trainers.forces_trainer

+ +
+ +
+
+ + + + +
+ +
+

core.models.equiformer_v2.trainers.forces_trainer#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

EquiformerV2ForcesTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

+
+
+class core.models.equiformer_v2.trainers.forces_trainer.EquiformerV2ForcesTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
+

Bases: fairchem.core.trainers.OCPTrainer

+

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to +Relaxed State (IS2RS) tasks.

+
+

Note

+

Examples of configurations for task, model, dataset and optimizer +can be found in configs/ocp_s2ef +and configs/ocp_is2rs.

+
+
+
Parameters:
+
    +
  • task (dict) – Task configuration.

  • +
  • model (dict) – Model configuration.

  • +
  • outputs (dict) – Output property configuration.

  • +
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • +
  • optimizer (dict) – Optimizer configuration.

  • +
  • loss_fns (dict) – Loss function configuration.

  • +
  • eval_metrics (dict) – Evaluation metrics configuration.

  • +
  • identifier (str) – Experiment identifier that is appended to log directory.

  • +
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. +(default: None)

  • +
  • is_debug (bool, optional) – Run in debug mode. +(default: False)

  • +
  • print_every (int, optional) – Frequency of printing logs. +(default: 100)

  • +
  • seed (int, optional) – Random number seed. +(default: None)

  • +
  • logger (str, optional) – Type of logger to be used. +(default: wandb)

  • +
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. +(default: 0)

  • +
  • amp (bool, optional) – Run using automatic mixed precision. +(default: False)

  • +
  • slurm (dict) – Slurm configuration. Currently just for keeping track. +(default: {})

  • +
  • noddp (bool, optional) – Run model without DDP.

  • +
+
+
+
+
+load_extras() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/trainers/index.html b/autoapi/core/models/equiformer_v2/trainers/index.html new file mode 100644 index 000000000..d6383920f --- /dev/null +++ b/autoapi/core/models/equiformer_v2/trainers/index.html @@ -0,0 +1,761 @@ + + + + + + + + + + + core.models.equiformer_v2.trainers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.trainers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.trainers#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.html b/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.html new file mode 100644 index 000000000..a0a90e459 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/trainers/lr_scheduler/index.html @@ -0,0 +1,936 @@ + + + + + + + + + + + core.models.equiformer_v2.trainers.lr_scheduler — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.equiformer_v2.trainers.lr_scheduler#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

CosineLRLambda

MultistepLRLambda

LRScheduler

Notes

+

+
+
+

Functions#

+ + + + + + + + + + + + +

multiply(obj, num)

cosine_lr_lambda(current_step, scheduler_params)

multistep_lr_lambda(→ float)

+
+
+core.models.equiformer_v2.trainers.lr_scheduler.multiply(obj, num)#
+
+ +
+
+core.models.equiformer_v2.trainers.lr_scheduler.cosine_lr_lambda(current_step: int, scheduler_params)#
+
+ +
+
+class core.models.equiformer_v2.trainers.lr_scheduler.CosineLRLambda(scheduler_params)#
+
+
+__call__(current_step: int)#
+
+ +
+ +
+
+core.models.equiformer_v2.trainers.lr_scheduler.multistep_lr_lambda(current_step: int, scheduler_params) float#
+
+ +
+
+class core.models.equiformer_v2.trainers.lr_scheduler.MultistepLRLambda(scheduler_params)#
+
+
+__call__(current_step: int) float#
+
+ +
+ +
+
+class core.models.equiformer_v2.trainers.lr_scheduler.LRScheduler(optimizer, config)#
+

Notes

+
    +
  1. scheduler.step() is called for every step for OC20 training.

  2. +
  3. We use “scheduler_params” in .yml to specify scheduler parameters.

  4. +
  5. +
    For cosine learning rate, we use LambdaLR with lambda function being cosine:

    scheduler: LambdaLR +scheduler_params:

    +
    +

    lambda_type: cosine +…

    +
    +
    +
    +
  6. +
  7. +
    Following 3., if cosine is used, scheduler_params in .yml looks like:

    scheduler: LambdaLR +scheduler_params:

    +
    +

    lambda_type: cosine +warmup_epochs: … +warmup_factor: … +lr_min_factor: …

    +
    +
    +
    +
  8. +
  9. +
    Following 3., if multistep is used, scheduler_params in .yml looks like:

    scheduler: LambdaLR +scheduler_params:

    +
    +

    lambda_type: multistep +warmup_epochs: … +warmup_factor: … +decay_epochs: … (list) +decay_rate: …

    +
    +
    +
    +
  10. +
+
+
Parameters:
+
    +
  • optimizer (obj) – torch optim object

  • +
  • config (dict) – Optim dict from the input config

  • +
+
+
+
+
+step(metrics=None, epoch=None)#
+
+ +
+
+filter_kwargs(config)#
+
+ +
+
+get_lr() float | None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/transformer_block/index.html b/autoapi/core/models/equiformer_v2/transformer_block/index.html new file mode 100644 index 000000000..af8a872c3 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/transformer_block/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + core.models.equiformer_v2.transformer_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.transformer_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.equiformer_v2.transformer_block#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

SO2EquivariantGraphAttention

SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing

FeedForwardNetwork

FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation

TransBlockV2

+
param sphere_channels:
+

Number of spherical channels

+
+
+

+
+
+class core.models.equiformer_v2.transformer_block.SO2EquivariantGraphAttention(sphere_channels: int, hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, activation='scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, use_gate_act: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.0)#
+

Bases: torch.nn.Module

+
+
SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing

SO(2) Convolution with radial function -> S2 Activation -> SO(2) Convolution -> attention weights and non-linear messages +attention weights * non-linear messages -> Linear

+
+
+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • +
  • num_heads (int) – Number of attention heads

  • +
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • +
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • +
  • output_channels (int) – Number of output channels

  • +
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • +
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • +
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. +The last one will be used as hidden size when use_atom_edge_embedding is True.

  • +
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • +
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • +
  • activation (str) – Type of activation function

  • +
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • +
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • +
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation.

  • +
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • +
  • alpha_drop (float) – Dropout rate for attention weights

  • +
+
+
+
+
+forward(x: torch.Tensor, atomic_numbers, edge_distance: torch.Tensor, edge_index)#
+
+ +
+ +
+
+class core.models.equiformer_v2.transformer_block.FeedForwardNetwork(sphere_channels: int, hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_grid, activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True)#
+

Bases: torch.nn.Module

+

FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during feedforward network

  • +
  • output_channels (int) – Number of output channels

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • +
  • activation (str) – Type of activation function

  • +
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • +
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs.

  • +
  • use_sep_s2_act (bool) – If True, use separable grid MLP when use_grid_mlp is True.

  • +
+
+
+
+
+forward(input_embedding)#
+
+ +
+ +
+
+class core.models.equiformer_v2.transformer_block.TransBlockV2(sphere_channels: int, attn_hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, ffn_hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list: list[int], use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, attn_activation: str = 'silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, norm_type: str = 'rms_norm_sh', alpha_drop: float = 0.0, drop_path_rate: float = 0.0, proj_drop: float = 0.0)#
+

Bases: torch.nn.Module

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • +
  • num_heads (int) – Number of attention heads

  • +
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • +
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • +
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • +
  • output_channels (int) – Number of output channels

  • +
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • +
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • +
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. +The last one will be used as hidden size when use_atom_edge_embedding is True.

  • +
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • +
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • +
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • +
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • +
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • +
  • ffn_activation (str) – Type of activation function for feedforward network

  • +
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • +
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFN.

  • +
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • +
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’])

  • +
  • alpha_drop (float) – Dropout rate for attention weights

  • +
  • drop_path_rate (float) – Drop path rate

  • +
  • proj_drop (float) – Dropout rate for outputs of attention and FFN

  • +
+
+
+
+
+forward(x, atomic_numbers, edge_distance, edge_index, batch)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/equiformer_v2/wigner/index.html b/autoapi/core/models/equiformer_v2/wigner/index.html new file mode 100644 index 000000000..e180cbf66 --- /dev/null +++ b/autoapi/core/models/equiformer_v2/wigner/index.html @@ -0,0 +1,810 @@ + + + + + + + + + + + core.models.equiformer_v2.wigner — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.equiformer_v2.wigner

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.equiformer_v2.wigner#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

wigner_D(→ torch.Tensor)

_z_rot_mat(→ torch.Tensor)

+
+
+

Attributes#

+ + + + + + +

_Jd

+
+
+core.models.equiformer_v2.wigner._Jd#
+
+ +
+
+core.models.equiformer_v2.wigner.wigner_D(lv: int, alpha: torch.Tensor, beta: torch.Tensor, gamma: torch.Tensor) torch.Tensor#
+
+ +
+
+core.models.equiformer_v2.wigner._z_rot_mat(angle: torch.Tensor, lv: int) torch.Tensor#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/escn/escn/index.html b/autoapi/core/models/escn/escn/index.html new file mode 100644 index 000000000..f036b98b1 --- /dev/null +++ b/autoapi/core/models/escn/escn/index.html @@ -0,0 +1,1081 @@ + + + + + + + + + + + core.models.escn.escn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.escn.escn#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

eSCN

Equivariant Spherical Channel Network

LayerBlock

Layer block: Perform one layer (message passing and aggregation) of the GNN

MessageBlock

Message block: Perform message passing

SO2Block

SO(2) Block: Perform SO(2) convolutions for all m (orders)

SO2Conv

SO(2) Conv: Perform an SO(2) convolution

EdgeBlock

Edge Block: Compute invariant edge representation from edge diatances and atomic numbers

EnergyBlock

Energy Block: Output block computing the energy

ForceBlock

Force Block: Output block computing the per atom forces

+
+
+class core.models.escn.escn.eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False)#
+

Bases: fairchem.core.models.base.BaseModel

+

Equivariant Spherical Channel Network +Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_neighbors (int) – Maximum number of neighbors per atom

  • +
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_layers (int) – Number of layers in the GNN

  • +
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • +
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • +
  • hidden_channels (int) – Number of hidden units in message passing

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • edge_channels (int) – Number of channels for the edge invariant features

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • basis_width_scalar (float) – Width of distance basis function

  • +
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • +
  • show_timing_info (bool) – Show timing and memory info

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+forward(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+ +
+
+class core.models.escn.escn.LayerBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act)#
+

Bases: torch.nn.Module

+

Layer block: Perform one layer (message passing and aggregation) of the GNN

+
+
Parameters:
+
    +
  • layer_idx (int) – Layer number

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • +
  • edge_channels (int) – Size of invariant edge embedding

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • distance_expansion (func) – Function used to compute distance embedding

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced)#
+
+ +
+ +
+
+class core.models.escn.escn.MessageBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act)#
+

Bases: torch.nn.Module

+

Message block: Perform message passing

+
+
Parameters:
+
    +
  • layer_idx (int) – Layer number

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • +
  • edge_channels (int) – Size of invariant edge embedding

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • distance_expansion (func) – Function used to compute distance embedding

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced)#
+
+ +
+ +
+
+class core.models.escn.escn.SO2Block(sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act)#
+

Bases: torch.nn.Module

+

SO(2) Block: Perform SO(2) convolutions for all m (orders)

+
+
Parameters:
+
    +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • +
  • edge_channels (int) – Size of invariant edge embedding

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x, x_edge, mappingReduced)#
+
+ +
+ +
+
+class core.models.escn.escn.SO2Conv(m: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act)#
+

Bases: torch.nn.Module

+

SO(2) Conv: Perform an SO(2) convolution

+
+
Parameters:
+
    +
  • m (int) – Order of the spherical harmonic coefficients

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • +
  • edge_channels (int) – Size of invariant edge embedding

  • +
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • +
  • (list – int): List of orders (m) for each resolution

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x_m, x_edge) torch.Tensor#
+
+ +
+ +
+
+class core.models.escn.escn.EdgeBlock(edge_channels, distance_expansion, max_num_elements, act)#
+

Bases: torch.nn.Module

+

Edge Block: Compute invariant edge representation from edge diatances and atomic numbers

+
+
Parameters:
+
    +
  • edge_channels (int) – Size of invariant edge embedding

  • +
  • distance_expansion (func) – Function used to compute distance embedding

  • +
  • max_num_elements (int) – Maximum number of atomic numbers

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(edge_distance, source_element, target_element)#
+
+ +
+ +
+
+class core.models.escn.escn.EnergyBlock(num_channels: int, num_sphere_samples: int, act)#
+

Bases: torch.nn.Module

+

Energy Block: Output block computing the energy

+
+
Parameters:
+
    +
  • num_channels (int) – Number of channels

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integral on the sphere

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x_pt) torch.Tensor#
+
+ +
+ +
+
+class core.models.escn.escn.ForceBlock(num_channels: int, num_sphere_samples: int, act)#
+

Bases: torch.nn.Module

+

Force Block: Output block computing the per atom forces

+
+
Parameters:
+
    +
  • num_channels (int) – Number of channels

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integral on the sphere

  • +
  • act (function) – Non-linear activation function

  • +
+
+
+
+
+forward(x_pt, sphere_points) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/escn/index.html b/autoapi/core/models/escn/index.html new file mode 100644 index 000000000..336331dba --- /dev/null +++ b/autoapi/core/models/escn/index.html @@ -0,0 +1,843 @@ + + + + + + + + + + + core.models.escn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.escn

+ +
+ +
+
+ + + + +
+ +
+

core.models.escn#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

eSCN

Equivariant Spherical Channel Network

+
+
+class core.models.escn.eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False)#
+

Bases: fairchem.core.models.base.BaseModel

+

Equivariant Spherical Channel Network +Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_neighbors (int) – Maximum number of neighbors per atom

  • +
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_layers (int) – Number of layers in the GNN

  • +
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • +
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • +
  • hidden_channels (int) – Number of hidden units in message passing

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • edge_channels (int) – Number of channels for the edge invariant features

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • basis_width_scalar (float) – Width of distance basis function

  • +
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • +
  • show_timing_info (bool) – Show timing and memory info

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+forward(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/escn/so3/index.html b/autoapi/core/models/escn/so3/index.html new file mode 100644 index 000000000..eeb075b5f --- /dev/null +++ b/autoapi/core/models/escn/so3/index.html @@ -0,0 +1,1074 @@ + + + + + + + + + + + core.models.escn.so3 — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.escn.so3#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

CoefficientMapping

Helper functions for coefficients used to reshape l<-->m and to get coefficients of specific degree or order

SO3_Embedding

Helper functions for irreps embedding

SO3_Rotation

Helper functions for Wigner-D rotations

SO3_Grid

Helper functions for grid representation of the irreps

+
+
+

Attributes#

+ + + + + + +

_Jd

+
+
+core.models.escn.so3._Jd#
+
+ +
+
+class core.models.escn.so3.CoefficientMapping(lmax_list: list[int], mmax_list: list[int], device)#
+

Helper functions for coefficients used to reshape l<–>m and to get coefficients of specific degree or order

+
+
Parameters:
+
    +
  • (list (mmax_list) – int): List of maximum degree of the spherical harmonics

  • +
  • (list – int): List of maximum order of the spherical harmonics

  • +
  • device – Device of the output

  • +
+
+
+
+
+complex_idx(m, lmax: int = -1)#
+
+ +
+
+coefficient_idx(lmax: int, mmax: int) torch.Tensor#
+
+ +
+ +
+
+class core.models.escn.so3.SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype)#
+

Bases: torch.nn.Module

+

Helper functions for irreps embedding

+
+
Parameters:
+
    +
  • length (int) – Batch size

  • +
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • +
  • num_channels (int) – Number of channels

  • +
  • device – Device of the output

  • +
  • dtype – type of the output tensors

  • +
+
+
+
+
+clone() SO3_Embedding#
+
+ +
+
+set_embedding(embedding) None#
+
+ +
+
+set_lmax_mmax(lmax_list, mmax_list) None#
+
+ +
+
+_expand_edge(edge_index) None#
+
+ +
+
+expand_edge(edge_index) SO3_Embedding#
+
+ +
+
+_reduce_edge(edge_index, num_nodes: int) None#
+
+ +
+
+_m_primary(mapping) None#
+
+ +
+
+_l_primary(mapping) None#
+
+ +
+
+_rotate(SO3_rotation, lmax_list, mmax_list) None#
+
+ +
+
+_rotate_inv(SO3_rotation, mappingReduced) None#
+
+ +
+
+_grid_act(SO3_grid, act, mappingReduced) None#
+
+ +
+
+to_grid(SO3_grid, lmax: int = -1) torch.Tensor#
+
+ +
+
+_from_grid(x_grid, SO3_grid, lmax: int = -1) None#
+
+ +
+ +
+
+class core.models.escn.so3.SO3_Rotation(rot_mat3x3: torch.Tensor, lmax: list[int])#
+

Bases: torch.nn.Module

+

Helper functions for Wigner-D rotations

+
+
Parameters:
+
    +
  • rot_mat3x3 (tensor) – Rotation matrix

  • +
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • +
+
+
+
+
+set_lmax(lmax) None#
+
+ +
+
+rotate(embedding, out_lmax, out_mmax) torch.Tensor#
+
+ +
+
+rotate_inv(embedding, in_lmax, in_mmax) torch.Tensor#
+
+ +
+
+RotationToWignerDMatrix(edge_rot_mat: torch.Tensor, start_lmax: int, end_lmax: int) torch.Tensor#
+
+ +
+
+wigner_D(lval, alpha, beta, gamma)#
+
+ +
+
+_z_rot_mat(angle: torch.Tensor, lv: int) torch.Tensor#
+
+ +
+ +
+
+class core.models.escn.so3.SO3_Grid(lmax: int, mmax: int)#
+

Bases: torch.nn.Module

+

Helper functions for grid representation of the irreps

+
+
Parameters:
+
    +
  • lmax (int) – Maximum degree of the spherical harmonics

  • +
  • mmax (int) – Maximum order of the spherical harmonics

  • +
+
+
+
+
+_initialize(device: torch.device) None#
+
+ +
+
+get_to_grid_mat(device: torch.device)#
+
+ +
+
+get_from_grid_mat(device: torch.device)#
+
+ +
+
+to_grid(embedding: torch.Tensor, lmax: int, mmax: int) torch.Tensor#
+
+ +
+
+from_grid(grid: torch.Tensor, lmax: int, mmax: int) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/gemnet/index.html b/autoapi/core/models/gemnet/gemnet/index.html new file mode 100644 index 000000000..a2799260e --- /dev/null +++ b/autoapi/core/models/gemnet/gemnet/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + core.models.gemnet.gemnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet.gemnet#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

GemNetT

GemNet-T, triplets-only variant of GemNet

+
+
+class core.models.gemnet.gemnet.GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

GemNet-T, triplets-only variant of GemNet

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the json file containing the scaling factors.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+get_triplets(edge_index, num_atoms)#
+

Get all b->a for each edge c->a. +It is possible that b=c, as long as the edges are distinct.

+
+
Returns:
+

    +
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • +
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • +
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) torch.Tensor#
+
+ +
+
+reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
+

Reorder edges to make finding counter-directional edges easier.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. Since we only use i->j +edges here, we lose some j->i edges and add others by +making it symmetric. +We could fix this by merging edge_index with its counter-edges, +including the cell_offsets, and then running torch.unique. +But this does not seem worth it.

+
+ +
+
+select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
+
+ +
+
+generate_interaction_graph(data)#
+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/index.html b/autoapi/core/models/gemnet/index.html new file mode 100644 index 000000000..b4ec454b4 --- /dev/null +++ b/autoapi/core/models/gemnet/index.html @@ -0,0 +1,921 @@ + + + + + + + + + + + core.models.gemnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

GemNetT

GemNet-T, triplets-only variant of GemNet

+
+
+class core.models.gemnet.GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

GemNet-T, triplets-only variant of GemNet

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the json file containing the scaling factors.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+get_triplets(edge_index, num_atoms)#
+

Get all b->a for each edge c->a. +It is possible that b=c, as long as the edges are distinct.

+
+
Returns:
+

    +
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • +
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • +
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) torch.Tensor#
+
+ +
+
+reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
+

Reorder edges to make finding counter-directional edges easier.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. Since we only use i->j +edges here, we lose some j->i edges and add others by +making it symmetric. +We could fix this by merging edge_index with its counter-edges, +including the cell_offsets, and then running torch.unique. +But this does not seem worth it.

+
+ +
+
+select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
+
+ +
+
+generate_interaction_graph(data)#
+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/initializers/index.html b/autoapi/core/models/gemnet/initializers/index.html new file mode 100644 index 000000000..235eb73d2 --- /dev/null +++ b/autoapi/core/models/gemnet/initializers/index.html @@ -0,0 +1,801 @@ + + + + + + + + + + + core.models.gemnet.initializers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.initializers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet.initializers#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

+
+
+core.models.gemnet.initializers._standardize(kernel)#
+

Makes sure that N*Var(W) = 1 and E[W] = 0

+
+ +
+
+core.models.gemnet.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
+

Generate a weight matrix with variance according to He (Kaiming) initialization. +Based on a random (semi-)orthogonal matrix neural networks +are expected to learn better when features are decorrelated +(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, +“Dropout: a simple way to prevent neural networks from overfitting”, +“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/atom_update_block/index.html b/autoapi/core/models/gemnet/layers/atom_update_block/index.html new file mode 100644 index 000000000..8cc37a025 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/atom_update_block/index.html @@ -0,0 +1,876 @@ + + + + + + + + + + + core.models.gemnet.layers.atom_update_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.atom_update_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet.layers.atom_update_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

+
+
+class core.models.gemnet.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None, name: str = 'atom_update')#
+

Bases: torch.nn.Module

+

Aggregate the message embeddings of the atoms

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_atom – Embedding size of the edges.

  • +
  • nHidden (int) – Number of residual blocks.

  • +
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+get_mlp(units_in, units, nHidden, activation)#
+
+ +
+
+forward(h, m, rbf, id_j)#
+
+
Returns:
+

h – Atom embedding.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size_atom)

+
+
+
+ +
+ +
+
+class core.models.gemnet.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation=None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs)#
+

Bases: AtomUpdateBlock

+

Combines the atom update block and subsequent final dense layer.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_atom – Embedding size of the edges.

  • +
  • nHidden (int) – Number of residual blocks.

  • +
  • num_targets (int) – Number of targets.

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
  • direct_forces (bool) – If true directly predict forces without taking the gradient of the energy potential.

  • +
  • output_init (int) – Kernel initializer of the final dense layer.

  • +
+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(h, m, rbf, id_j)#
+
+
Returns:
+

    +
  • (E, F) (tuple)

  • +
  • - E (torch.Tensor, shape=(nAtoms, num_targets))

  • +
  • - F (torch.Tensor, shape=(nEdges, num_targets))

  • +
  • Energy and force prediction

  • +
+

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/base_layers/index.html b/autoapi/core/models/gemnet/layers/base_layers/index.html new file mode 100644 index 000000000..cfac6a2b0 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/base_layers/index.html @@ -0,0 +1,951 @@ + + + + + + + + + + + core.models.gemnet.layers.base_layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.base_layers

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet.layers.base_layers#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

Dense

Combines dense layer with scaling for swish activation.

ScaledSiLU

Base class for all neural network modules.

SiQU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

+
+
+class core.models.gemnet.layers.base_layers.Dense(in_features, out_features, bias: bool = False, activation=None)#
+

Bases: torch.nn.Module

+

Combines dense layer with scaling for swish activation.

+
+
Parameters:
+
    +
  • units (int) – Output embedding size.

  • +
  • activation (str) – Name of the activation function to use.

  • +
  • bias (bool) – True if use bias.

  • +
+
+
+
+
+reset_parameters(initializer=he_orthogonal_init) None#
+
+ +
+
+forward(x)#
+
+ +
+ +
+
+class core.models.gemnet.layers.base_layers.ScaledSiLU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x)#
+
+ +
+ +
+
+class core.models.gemnet.layers.base_layers.SiQU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x)#
+
+ +
+ +
+
+class core.models.gemnet.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
+

Bases: torch.nn.Module

+

Residual block with output scaled by 1/sqrt(2).

+
+
Parameters:
+
    +
  • units (int) – Output embedding size.

  • +
  • nLayers (int) – Number of dense layers.

  • +
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • +
+
+
+
+
+forward(input)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/basis_utils/index.html b/autoapi/core/models/gemnet/layers/basis_utils/index.html new file mode 100644 index 000000000..b0ec409ae --- /dev/null +++ b/autoapi/core/models/gemnet/layers/basis_utils/index.html @@ -0,0 +1,923 @@ + + + + + + + + + + + core.models.gemnet.layers.basis_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.basis_utils

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet.layers.basis_utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + +

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to

sph_harm_prefactor(l_degree, m_order)

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

real_sph_harm(L_maxdegree, use_theta[, use_phi, ...])

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).

+
+
+core.models.gemnet.layers.basis_utils.Jn(r: float, n: int)#
+

numerical spherical bessel functions of order n

+
+ +
+
+core.models.gemnet.layers.basis_utils.Jn_zeros(n: int, k: int)#
+

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

+
+ +
+
+core.models.gemnet.layers.basis_utils.spherical_bessel_formulas(n: int)#
+

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

+
+ +
+
+core.models.gemnet.layers.basis_utils.bessel_basis(n: int, k: int)#
+

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to +order n (excluded) and maximum frequency k (excluded).

+
+
Returns:
+

+
list

Bessel basis formulas taking in a single argument x. +Has length n where each element has length k. -> In total n*k many.

+
+
+

+
+
Return type:
+

bess_basis

+
+
+
+ +
+
+core.models.gemnet.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int)#
+

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

+
+
Parameters:
+
    +
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • +
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • +
+
+
Returns:
+

factor

+
+
Return type:
+

float

+
+
+
+ +
+
+core.models.gemnet.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
+

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials (degree L is excluded).

  • +
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • +
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.

  • +
+
+
Returns:
+

polynomials – Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).

+
+
Return type:
+

list

+
+
+
+ +
+
+core.models.gemnet.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True)#
+

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). +Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics (degree L is excluded).

  • +
  • use_theta (bool) –

      +
    • True: Expects the input of the formula strings to contain theta.

    • +
    • False: Expects the input of the formula strings to contain z.

    • +
    +

  • +
  • use_phi (bool) –

      +
    • True: Expects the input of the formula strings to contain phi.

    • +
    • False: Expects the input of the formula strings to contain x and y.

    • +
    +

    Does nothing if zero_m_only is True

    +

  • +
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • +
+
+
Returns:
+

Y_lm_real – Computes formula strings of the the real part of the spherical harmonics up +to degree L (where degree L is not excluded). +In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then +the total count is reduced to be only L many.

+
+
Return type:
+

list

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/efficient/index.html b/autoapi/core/models/gemnet/layers/efficient/index.html new file mode 100644 index 000000000..697456653 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/efficient/index.html @@ -0,0 +1,883 @@ + + + + + + + + + + + core.models.gemnet.layers.efficient — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet.layers.efficient#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

EfficientInteractionDownProjection

Down projection in the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
+class core.models.gemnet.layers.efficient.EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int)#
+

Bases: torch.nn.Module

+

Down projection in the efficient reformulation.

+
+
Parameters:
+
    +
  • emb_size_interm (int) – Intermediate embedding size (down-projection size).

  • +
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • +
+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(rbf, sph, id_ca, id_ragged_idx)#
+
+
Parameters:
+
    +
  • rbf (torch.Tensor, shape=(1, nEdges, num_radial))

  • +
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical))

  • +
  • id_ca

  • +
  • id_ragged_idx

  • +
+
+
Returns:
+

    +
  • rbf_W1 (torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical))

  • +
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet.layers.efficient.EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int)#
+

Bases: torch.nn.Module

+

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
Parameters:
+
    +
  • units_out (int) – Embedding output size of the bilinear layer.

  • +
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • +
+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(basis, m, id_reduce, id_ragged_idx) torch.Tensor#
+
+
Parameters:
+
    +
  • basis

  • +
  • m (quadruplets: m = m_db , triplets: m = m_ba)

  • +
  • id_reduce

  • +
  • id_ragged_idx

  • +
+
+
Returns:
+

m_ca – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, units_out)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/embedding_block/index.html b/autoapi/core/models/gemnet/layers/embedding_block/index.html new file mode 100644 index 000000000..9baaf47a5 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/embedding_block/index.html @@ -0,0 +1,857 @@ + + + + + + + + + + + core.models.gemnet.layers.embedding_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.embedding_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet.layers.embedding_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

+
+
+class core.models.gemnet.layers.embedding_block.AtomEmbedding(emb_size, num_elements: int)#
+

Bases: torch.nn.Module

+

Initial atom embeddings based on the atom type

+
+
Parameters:
+

emb_size (int) – Atom embeddings size

+
+
+
+
+forward(Z)#
+
+
Returns:
+

h – Atom embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size)

+
+
+
+ +
+ +
+
+class core.models.gemnet.layers.embedding_block.EdgeEmbedding(atom_features, edge_features, out_features, activation=None)#
+

Bases: torch.nn.Module

+

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

+
+
Parameters:
+
    +
  • emb_size (int) – Embedding size after the dense layer.

  • +
  • activation (str) – Activation function used in the dense layer.

  • +
+
+
+
+
+forward(h, m_rbf, idx_s, idx_t)#
+
+
Parameters:
+
    +
  • h

  • +
  • m_rbf (shape (nEdges, nFeatures)) – in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st

  • +
  • idx_s

  • +
  • idx_t

  • +
+
+
Returns:
+

m_st – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/index.html b/autoapi/core/models/gemnet/layers/index.html new file mode 100644 index 000000000..0c2c76c9c --- /dev/null +++ b/autoapi/core/models/gemnet/layers/index.html @@ -0,0 +1,766 @@ + + + + + + + + + + + core.models.gemnet.layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet.layers#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/interaction_block/index.html b/autoapi/core/models/gemnet/layers/interaction_block/index.html new file mode 100644 index 000000000..45d694b04 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/interaction_block/index.html @@ -0,0 +1,866 @@ + + + + + + + + + + + core.models.gemnet.layers.interaction_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.interaction_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet.layers.interaction_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

InteractionBlockTripletsOnly

Interaction block for GemNet-T/dT.

TripletInteraction

Triplet-based message passing block.

+
+
+class core.models.gemnet.layers.interaction_block.InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction')#
+

Bases: torch.nn.Module

+

Interaction block for GemNet-T/dT.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
+
+
+
+
+forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t)#
+
+
Returns:
+

    +
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • +
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet.layers.interaction_block.TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs)#
+

Bases: torch.nn.Module

+

Triplet-based message passing block.

+
+
Parameters:
+
    +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.

  • +
  • emb_size_bilinear (int) – Embedding size of the edge embeddings after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
+
+
+
+
+forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca)#
+
+
Returns:
+

m – Edge embeddings (c->a).

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size_edge)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/radial_basis/index.html b/autoapi/core/models/gemnet/layers/radial_basis/index.html new file mode 100644 index 000000000..4efdcbe65 --- /dev/null +++ b/autoapi/core/models/gemnet/layers/radial_basis/index.html @@ -0,0 +1,932 @@ + + + + + + + + + + + core.models.gemnet.layers.radial_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet.layers.radial_basis#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

SphericalBesselBasis

1D spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

+
param num_radial:
+

Controls maximum frequency.

+
+
+

+
+
+class core.models.gemnet.layers.radial_basis.PolynomialEnvelope(exponent: int)#
+

Bases: torch.nn.Module

+

Polynomial envelope function that ensures a smooth cutoff.

+
+
Parameters:
+

exponent (int) – Exponent of the envelope function.

+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet.layers.radial_basis.ExponentialEnvelope#
+

Bases: torch.nn.Module

+

Exponential envelope function that ensures a smooth cutoff, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
+

Bases: torch.nn.Module

+

1D spherical Bessel basis

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
+

Bases: torch.nn.Module

+

Bernstein polynomial basis, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. +Default: gamma = 0.5 * a_0**-1 = 0.94486, +inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • +
+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None)#
+

Bases: torch.nn.Module

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • +
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • +
+
+
+
+
+forward(d)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/layers/spherical_basis/index.html b/autoapi/core/models/gemnet/layers/spherical_basis/index.html new file mode 100644 index 000000000..d78f50b7c --- /dev/null +++ b/autoapi/core/models/gemnet/layers/spherical_basis/index.html @@ -0,0 +1,807 @@ + + + + + + + + + + + core.models.gemnet.layers.spherical_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet.layers.spherical_basis

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet.layers.spherical_basis#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

CircularBasisLayer

2D Fourier Bessel Basis

+
+
+class core.models.gemnet.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet.layers.radial_basis.RadialBasis, cbf, efficient: bool = False)#
+

Bases: torch.nn.Module

+

2D Fourier Bessel Basis

+
+
Parameters:
+
    +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • radial_basis (RadialBasis) – Radial basis functions

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function

  • +
  • efficient (bool) – Whether to use the “efficient” summation order

  • +
+
+
+
+
+forward(D_ca, cosφ_cab, id3_ca)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet/utils/index.html b/autoapi/core/models/gemnet/utils/index.html new file mode 100644 index 000000000..9038c70c2 --- /dev/null +++ b/autoapi/core/models/gemnet/utils/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + core.models.gemnet.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet.utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

read_json(path)

update_json(→ None)

write_json(→ None)

read_value_json(path, key)

ragged_range(→ torch.Tensor)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

calculate_interatomic_vectors(→ tuple[torch.Tensor, ...)

Calculate the vectors connecting the given atom pairs,

inner_product_normalized(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

mask_neighbors(→ torch.Tensor)

+
+
+core.models.gemnet.utils.read_json(path: str)#
+
+ +
+
+core.models.gemnet.utils.update_json(path: str, data) None#
+
+ +
+
+core.models.gemnet.utils.write_json(path: str, data) None#
+
+ +
+
+core.models.gemnet.utils.read_value_json(path: str, key: str)#
+
+ +
+
+core.models.gemnet.utils.ragged_range(sizes: torch.Tensor) torch.Tensor#
+

Multiple concatenated ranges.

+

Examples

+

sizes = [1 4 2 3] +Return: [0 0 1 2 3 0 1 0 1 2]

+
+ +
+
+core.models.gemnet.utils.repeat_blocks(sizes: torch.Tensor, repeats: int | torch.Tensor, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
+

Repeat blocks of indices. +Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

+

continuous_indexing: Whether to keep increasing the index after each block +start_idx: Starting index +block_inc: Number to increment by after each block,

+
+

either global or per block. Shape: len(sizes) - 1

+
+
+
repeat_inc: Number to increment by after each repetition,

either global or per block

+
+
+

Examples

+

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False +Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +repeat_inc = 4 +Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +start_idx = 5 +Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +block_inc = 1 +Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] +sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 1 2 0 1 2 3 4 3 4 3 4] +sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True +Return: [0 1 0 1 5 6 5 6]

+
+ +
+
+core.models.gemnet.utils.calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor) tuple[torch.Tensor, torch.Tensor]#
+

Calculate the vectors connecting the given atom pairs, +considering offsets from periodic boundary conditions (PBC).

+
+
Parameters:
+
    +
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • +
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • +
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • +
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. +Subtract this from the correct direction.

  • +
+
+
Returns:
+

(D_st, V_st)

+
+
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

+
+
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

+
+
+

+
+
Return type:
+

tuple

+
+
+
+ +
+
+core.models.gemnet.utils.inner_product_normalized(x: torch.Tensor, y: torch.Tensor) torch.Tensor#
+

Calculate the inner product between the given normalized vectors, +giving a result between -1 and 1.

+
+ +
+
+core.models.gemnet.utils.mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) torch.Tensor#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/gemnet/index.html b/autoapi/core/models/gemnet_gp/gemnet/index.html new file mode 100644 index 000000000..6d4b1a064 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/gemnet/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + core.models.gemnet_gp.gemnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp.gemnet#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

GraphParallelGemNetT

GemNet-T, triplets-only variant of GemNet

+
+
+class core.models.gemnet_gp.gemnet.GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

GemNet-T, triplets-only variant of GemNet

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the json file containing the scaling factors.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+get_triplets(edge_index, num_atoms)#
+

Get all b->a for each edge c->a. +It is possible that b=c, as long as the edges are distinct.

+
+
Returns:
+

    +
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • +
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • +
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
+
+ +
+
+reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
+

Reorder edges to make finding counter-directional edges easier.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. Since we only use i->j +edges here, we lose some j->i edges and add others by +making it symmetric. +We could fix this by merging edge_index with its counter-edges, +including the cell_offsets, and then running torch.unique. +But this does not seem worth it.

+
+ +
+
+select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
+
+ +
+
+generate_interaction_graph(data)#
+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/index.html b/autoapi/core/models/gemnet_gp/index.html new file mode 100644 index 000000000..6a0a486b0 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/index.html @@ -0,0 +1,921 @@ + + + + + + + + + + + core.models.gemnet_gp — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

GraphParallelGemNetT

GemNet-T, triplets-only variant of GemNet

+
+
+class core.models.gemnet_gp.GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

GemNet-T, triplets-only variant of GemNet

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the json file containing the scaling factors.

  • +
+
+
+
+
+property num_params#
+
+ +
+
+get_triplets(edge_index, num_atoms)#
+

Get all b->a for each edge c->a. +It is possible that b=c, as long as the edges are distinct.

+
+
Returns:
+

    +
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • +
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • +
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
+
+ +
+
+reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
+

Reorder edges to make finding counter-directional edges easier.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. Since we only use i->j +edges here, we lose some j->i edges and add others by +making it symmetric. +We could fix this by merging edge_index with its counter-edges, +including the cell_offsets, and then running torch.unique. +But this does not seem worth it.

+
+ +
+
+select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
+
+ +
+
+generate_interaction_graph(data)#
+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/initializers/index.html b/autoapi/core/models/gemnet_gp/initializers/index.html new file mode 100644 index 000000000..6ef668e65 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/initializers/index.html @@ -0,0 +1,801 @@ + + + + + + + + + + + core.models.gemnet_gp.initializers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.initializers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet_gp.initializers#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

+
+
+core.models.gemnet_gp.initializers._standardize(kernel)#
+

Makes sure that N*Var(W) = 1 and E[W] = 0

+
+ +
+
+core.models.gemnet_gp.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
+

Generate a weight matrix with variance according to He (Kaiming) initialization. +Based on a random (semi-)orthogonal matrix neural networks +are expected to learn better when features are decorrelated +(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, +“Dropout: a simple way to prevent neural networks from overfitting”, +“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.html b/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.html new file mode 100644 index 000000000..f45d0ecda --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/atom_update_block/index.html @@ -0,0 +1,917 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.atom_update_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp.layers.atom_update_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

+
+
+

Functions#

+ + + + + + +

scatter_sum(→ torch.Tensor)

Clone of torch_scatter.scatter_sum but without in-place operations

+
+
+core.models.gemnet_gp.layers.atom_update_block.scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1, out: torch.Tensor | None = None, dim_size: int | None = None) torch.Tensor#
+

Clone of torch_scatter.scatter_sum but without in-place operations

+
+ +
+
+class core.models.gemnet_gp.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation: str | None = None, name: str = 'atom_update')#
+

Bases: torch.nn.Module

+

Aggregate the message embeddings of the atoms

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_atom – Embedding size of the edges.

  • +
  • nHidden (int) – Number of residual blocks.

  • +
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+get_mlp(units_in: int, units: int, nHidden: int, activation: str | None)#
+
+ +
+
+forward(nAtoms: int, m: int, rbf, id_j)#
+
+
Returns:
+

h – Atom embedding.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size_atom)

+
+
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation: str | None = None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs)#
+

Bases: AtomUpdateBlock

+

Combines the atom update block and subsequent final dense layer.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_atom – Embedding size of the edges.

  • +
  • nHidden (int) – Number of residual blocks.

  • +
  • num_targets (int) – Number of targets.

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
  • direct_forces (bool) – If true directly predict forces without taking the gradient of the energy potential.

  • +
  • output_init (int) – Kernel initializer of the final dense layer.

  • +
+
+
+
+
+dense_rbf_F: core.models.gemnet_gp.layers.base_layers.Dense#
+
+ +
+
+out_forces: core.models.gemnet_gp.layers.base_layers.Dense#
+
+ +
+
+out_energy: core.models.gemnet_gp.layers.base_layers.Dense#
+
+ +
+
+reset_parameters() None#
+
+ +
+
+forward(nAtoms: int, m, rbf, id_j: torch.Tensor)#
+
+
Returns:
+

    +
  • (E, F) (tuple)

  • +
  • - E (torch.Tensor, shape=(nAtoms, num_targets))

  • +
  • - F (torch.Tensor, shape=(nEdges, num_targets))

  • +
  • Energy and force prediction

  • +
+

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/base_layers/index.html b/autoapi/core/models/gemnet_gp/layers/base_layers/index.html new file mode 100644 index 000000000..1aa8a76d4 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/base_layers/index.html @@ -0,0 +1,951 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.base_layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers.base_layers

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers.base_layers#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

Dense

Combines dense layer with scaling for swish activation.

ScaledSiLU

Base class for all neural network modules.

SiQU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

+
+
+class core.models.gemnet_gp.layers.base_layers.Dense(num_in_features: int, num_out_features: int, bias: bool = False, activation: str | None = None)#
+

Bases: torch.nn.Module

+

Combines dense layer with scaling for swish activation.

+
+
Parameters:
+
    +
  • units (int) – Output embedding size.

  • +
  • activation (str) – Name of the activation function to use.

  • +
  • bias (bool) – True if use bias.

  • +
+
+
+
+
+reset_parameters(initializer=he_orthogonal_init) None#
+
+ +
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.base_layers.ScaledSiLU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.base_layers.SiQU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
+

Bases: torch.nn.Module

+

Residual block with output scaled by 1/sqrt(2).

+
+
Parameters:
+
    +
  • units (int) – Output embedding size.

  • +
  • nLayers (int) – Number of dense layers.

  • +
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • +
+
+
+
+
+forward(input: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/basis_utils/index.html b/autoapi/core/models/gemnet_gp/layers/basis_utils/index.html new file mode 100644 index 000000000..dd8efd76f --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/basis_utils/index.html @@ -0,0 +1,923 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.basis_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers.basis_utils

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers.basis_utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + +

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to

sph_harm_prefactor(→ float)

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

real_sph_harm(L_maxdegree, use_theta[, use_phi, ...])

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).

+
+
+core.models.gemnet_gp.layers.basis_utils.Jn(r: float, n: int)#
+

numerical spherical bessel functions of order n

+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.Jn_zeros(n: int, k: int)#
+

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.spherical_bessel_formulas(n)#
+

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.bessel_basis(n: int, k: int)#
+

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to +order n (excluded) and maximum frequency k (excluded).

+
+
Returns:
+

+
list

Bessel basis formulas taking in a single argument x. +Has length n where each element has length k. -> In total n*k many.

+
+
+

+
+
Return type:
+

bess_basis

+
+
+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int) float#
+

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

+
+
Parameters:
+
    +
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • +
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • +
+
+
Returns:
+

factor

+
+
Return type:
+

float

+
+
+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
+

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials (degree L is excluded).

  • +
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • +
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.

  • +
+
+
Returns:
+

polynomials – Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).

+
+
Return type:
+

list

+
+
+
+ +
+
+core.models.gemnet_gp.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True)#
+

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). +Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics (degree L is excluded).

  • +
  • use_theta (bool) –

      +
    • True: Expects the input of the formula strings to contain theta.

    • +
    • False: Expects the input of the formula strings to contain z.

    • +
    +

  • +
  • use_phi (bool) –

      +
    • True: Expects the input of the formula strings to contain phi.

    • +
    • False: Expects the input of the formula strings to contain x and y.

    • +
    +

    Does nothing if zero_m_only is True

    +

  • +
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • +
+
+
Returns:
+

Y_lm_real – Computes formula strings of the the real part of the spherical harmonics up +to degree L (where degree L is not excluded). +In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then +the total count is reduced to be only L many.

+
+
Return type:
+

list

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/efficient/index.html b/autoapi/core/models/gemnet_gp/layers/efficient/index.html new file mode 100644 index 000000000..55d58879e --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/efficient/index.html @@ -0,0 +1,883 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.efficient — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp.layers.efficient#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

EfficientInteractionDownProjection

Down projection in the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
+class core.models.gemnet_gp.layers.efficient.EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int)#
+

Bases: torch.nn.Module

+

Down projection in the efficient reformulation.

+
+
Parameters:
+
    +
  • emb_size_interm (int) – Intermediate embedding size (down-projection size).

  • +
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • +
+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(rbf: torch.Tensor, sph: torch.Tensor, id_ca, id_ragged_idx, Kmax: int) tuple[torch.Tensor, torch.Tensor]#
+
+
Parameters:
+
    +
  • rbf (torch.Tensor, shape=(1, nEdges, num_radial))

  • +
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical))

  • +
  • id_ca

  • +
  • id_ragged_idx

  • +
+
+
Returns:
+

    +
  • rbf_W1 (torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical))

  • +
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.efficient.EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int)#
+

Bases: torch.nn.Module

+

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
Parameters:
+
    +
  • units_out (int) – Embedding output size of the bilinear layer.

  • +
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • +
+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(basis: tuple[torch.Tensor, torch.Tensor], m, id_reduce, id_ragged_idx, edge_offset, Kmax: int) torch.Tensor#
+
+
Parameters:
+
    +
  • basis

  • +
  • m (quadruplets: m = m_db , triplets: m = m_ba)

  • +
  • id_reduce

  • +
  • id_ragged_idx

  • +
+
+
Returns:
+

m_ca – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, units_out)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/embedding_block/index.html b/autoapi/core/models/gemnet_gp/layers/embedding_block/index.html new file mode 100644 index 000000000..21490c96c --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/embedding_block/index.html @@ -0,0 +1,857 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.embedding_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers.embedding_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers.embedding_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

+
+
+class core.models.gemnet_gp.layers.embedding_block.AtomEmbedding(emb_size: int)#
+

Bases: torch.nn.Module

+

Initial atom embeddings based on the atom type

+
+
Parameters:
+

emb_size (int) – Atom embeddings size

+
+
+
+
+forward(Z) torch.Tensor#
+
+
Returns:
+

h – Atom embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size)

+
+
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.embedding_block.EdgeEmbedding(atom_features: int, edge_features: int, num_out_features: int, activation: str | None = None)#
+

Bases: torch.nn.Module

+

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

+
+
Parameters:
+
    +
  • emb_size (int) – Embedding size after the dense layer.

  • +
  • activation (str) – Activation function used in the dense layer.

  • +
+
+
+
+
+forward(h, m_rbf, idx_s, idx_t) torch.Tensor#
+
+
Parameters:
+
    +
  • h

  • +
  • m_rbf (shape (nEdges, nFeatures)) – in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st

  • +
  • idx_s

  • +
  • idx_t

  • +
+
+
Returns:
+

m_st – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/index.html b/autoapi/core/models/gemnet_gp/layers/index.html new file mode 100644 index 000000000..02bc99cd2 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/index.html @@ -0,0 +1,766 @@ + + + + + + + + + + + core.models.gemnet_gp.layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/interaction_block/index.html b/autoapi/core/models/gemnet_gp/layers/interaction_block/index.html new file mode 100644 index 000000000..19045a84f --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/interaction_block/index.html @@ -0,0 +1,869 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.interaction_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers.interaction_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers.interaction_block#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

InteractionBlockTripletsOnly

Interaction block for GemNet-T/dT.

TripletInteraction

Triplet-based message passing block.

+
+
+class core.models.gemnet_gp.layers.interaction_block.InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction')#
+

Bases: torch.nn.Module

+

Interaction block for GemNet-T/dT.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
+
+
+
+
+forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t, edge_offset, Kmax, nAtoms)#
+
+
Returns:
+

    +
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • +
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • +
  • Node (h)

  • +
  • Edge (m, rbf3, id_swap, rbf_h, idx_s, idx_t, cbf3[0], cbf3[1] (dense))

  • +
  • Triplet (id3_ragged_idx, id3_ba, id3_ca)

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.interaction_block.TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs)#
+

Bases: torch.nn.Module

+

Triplet-based message passing block.

+
+
Parameters:
+
    +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip (int) – (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.

  • +
  • emb_size_bilinear (int) – Embedding size of the edge embeddings after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • +
+
+
+
+
+forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, edge_offset, Kmax)#
+
+
Returns:
+

m – Edge embeddings (c->a).

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size_edge)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/radial_basis/index.html b/autoapi/core/models/gemnet_gp/layers/radial_basis/index.html new file mode 100644 index 000000000..dd77fe399 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/radial_basis/index.html @@ -0,0 +1,932 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.radial_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp.layers.radial_basis#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

SphericalBesselBasis

1D spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

+
param num_radial:
+

Controls maximum frequency.

+
+
+

+
+
+class core.models.gemnet_gp.layers.radial_basis.PolynomialEnvelope(exponent: int)#
+

Bases: torch.nn.Module

+

Polynomial envelope function that ensures a smooth cutoff.

+
+
Parameters:
+

exponent (int) – Exponent of the envelope function.

+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.radial_basis.ExponentialEnvelope#
+

Bases: torch.nn.Module

+

Exponential envelope function that ensures a smooth cutoff, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
+forward(d_scaled) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
+

Bases: torch.nn.Module

+

1D spherical Bessel basis

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
+
+
+
+
+forward(d_scaled)#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
+

Bases: torch.nn.Module

+

Bernstein polynomial basis, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. +Default: gamma = 0.5 * a_0**-1 = 0.94486, +inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • +
+
+
+
+
+forward(d_scaled) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_gp.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None)#
+

Bases: torch.nn.Module

+
+
Parameters:
+
    +
  • num_radial (int) – Controls maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • +
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • +
+
+
+
+
+forward(d)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.html b/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.html new file mode 100644 index 000000000..5c8428155 --- /dev/null +++ b/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.html @@ -0,0 +1,807 @@ + + + + + + + + + + + core.models.gemnet_gp.layers.spherical_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_gp.layers.spherical_basis

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet_gp.layers.spherical_basis#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

CircularBasisLayer

2D Fourier Bessel Basis

+
+
+class core.models.gemnet_gp.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_gp.layers.radial_basis.RadialBasis, cbf, efficient: bool = False)#
+

Bases: torch.nn.Module

+

2D Fourier Bessel Basis

+
+
Parameters:
+
    +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • radial_basis (RadialBasis) – Radial basis functions

  • +
  • cbf (dict) – Name and hyperparameters of the cosine basis function

  • +
  • efficient (bool) – Whether to use the “efficient” summation order

  • +
+
+
+
+
+forward(D_ca, cosφ_cab, id3_ca)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_gp/utils/index.html b/autoapi/core/models/gemnet_gp/utils/index.html new file mode 100644 index 000000000..aa2d0b82d --- /dev/null +++ b/autoapi/core/models/gemnet_gp/utils/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + core.models.gemnet_gp.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_gp.utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

read_json(path)

update_json(→ None)

write_json(→ None)

read_value_json(path, key)

ragged_range(sizes)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

calculate_interatomic_vectors(→ tuple[torch.Tensor, ...)

Calculate the vectors connecting the given atom pairs,

inner_product_normalized(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

mask_neighbors(neighbors, edge_mask)

+
+
+core.models.gemnet_gp.utils.read_json(path: str)#
+
+ +
+
+core.models.gemnet_gp.utils.update_json(path: str, data) None#
+
+ +
+
+core.models.gemnet_gp.utils.write_json(path: str, data) None#
+
+ +
+
+core.models.gemnet_gp.utils.read_value_json(path: str, key)#
+
+ +
+
+core.models.gemnet_gp.utils.ragged_range(sizes)#
+

Multiple concatenated ranges.

+

Examples

+

sizes = [1 4 2 3] +Return: [0 0 1 2 3 0 1 0 1 2]

+
+ +
+
+core.models.gemnet_gp.utils.repeat_blocks(sizes: torch.Tensor, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
+

Repeat blocks of indices. +Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

+

continuous_indexing: Whether to keep increasing the index after each block +start_idx: Starting index +block_inc: Number to increment by after each block,

+
+

either global or per block. Shape: len(sizes) - 1

+
+
+
repeat_inc: Number to increment by after each repetition,

either global or per block

+
+
+

Examples

+

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False +Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +repeat_inc = 4 +Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +start_idx = 5 +Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +block_inc = 1 +Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] +sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 1 2 0 1 2 3 4 3 4 3 4] +sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True +Return: [0 1 0 1 5 6 5 6]

+
+ +
+
+core.models.gemnet_gp.utils.calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor | None) tuple[torch.Tensor, torch.Tensor]#
+

Calculate the vectors connecting the given atom pairs, +considering offsets from periodic boundary conditions (PBC).

+
+
Parameters:
+
    +
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • +
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • +
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • +
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. +Subtract this from the correct direction.

  • +
+
+
Returns:
+

(D_st, V_st)

+
+
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

+
+
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

+
+
+

+
+
Return type:
+

tuple

+
+
+
+ +
+
+core.models.gemnet_gp.utils.inner_product_normalized(x: torch.Tensor, y: torch.Tensor) torch.Tensor#
+

Calculate the inner product between the given normalized vectors, +giving a result between -1 and 1.

+
+ +
+
+core.models.gemnet_gp.utils.mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/gemnet_oc/index.html b/autoapi/core/models/gemnet_oc/gemnet_oc/index.html new file mode 100644 index 000000000..24b0bbbe1 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/gemnet_oc/index.html @@ -0,0 +1,1018 @@ + + + + + + + + + + + core.models.gemnet_oc.gemnet_oc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.gemnet_oc#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

GemNetOC

+
param num_atoms (int):
+

+
+

+
+
+class core.models.gemnet_oc.gemnet_oc.GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs)#
+

Bases: fairchem.core.models.base.BaseModel

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_aint_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • +
  • emb_size_aint_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • num_output_afteratom (int) – Number of residual blocks in the output blocks +after adding the atom embedding.

  • +
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • +
  • num_global_out_layers (int) – Number of final residual blocks before the output.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • use_pbc (bool) – Whether to use periodic boundary conditions.

  • +
  • scale_backprop_forces (bool) – Whether to scale up the energy and then scales down the forces +to prevent NaNs and infs in backpropagated forces.

  • +
  • cutoff (float) – Embedding cutoff for interatomic connections and embeddings in Angstrom.

  • +
  • cutoff_qint (float) – Quadruplet interaction cutoff in Angstrom. +Optional. Uses cutoff per default.

  • +
  • cutoff_aeaint (float) – Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. +Optional. Uses cutoff per default.

  • +
  • cutoff_aint (float) – Atom-to-atom interaction cutoff in Angstrom. +Optional. Uses maximum of all other cutoffs per default.

  • +
  • max_neighbors (int) – Maximum number of neighbors for interatomic connections and embeddings.

  • +
  • max_neighbors_qint (int) – Maximum number of quadruplet interactions per embedding. +Optional. Uses max_neighbors per default.

  • +
  • max_neighbors_aeaint (int) – Maximum number of edge-to-atom and atom-to-edge interactions per embedding. +Optional. Uses max_neighbors per default.

  • +
  • max_neighbors_aint (int) – Maximum number of atom-to-atom interactions per atom. +Optional. Uses maximum of all other neighbors per default.

  • +
  • enforce_max_neighbors_strictly (bool) – When subselected edges based on max_neighbors args, arbitrarily +select amongst degenerate edges to have exactly the correct number.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • rbf_spherical (dict) – Name and hyperparameters of the radial basis function used as part of the +circular and spherical bases. +Optional. Uses rbf per default.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • +
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • forces_coupled (bool) – If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False.

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the pytorch file containing the scaling factors.

  • +
  • quad_interaction (bool) – Whether to use quadruplet interactions (with dihedral angles)

  • +
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions

  • +
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions

  • +
  • atom_interaction (bool) – Whether to use atom-to-atom interactions

  • +
  • scale_basis (bool) – Whether to use a scaling layer in the raw basis function for better +numerical stability.

  • +
  • qint_tags (list) – Which atom tags to use quadruplet interactions for. +0=sub-surface bulk, 1=surface, 2=adsorbate atoms.

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)#
+
+ +
+
+set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint)#
+
+ +
+
+init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis)#
+
+ +
+
+init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf)#
+
+ +
+
+calculate_quad_angles(V_st, V_qint_st, quad_idx)#
+

Calculate angles for quadruplet-based message passing.

+
+
Parameters:
+
    +
  • V_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t

  • +
  • V_qint_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t for the quadruplet +interaction graph

  • +
  • quad_idx (dict of torch.Tensor) – Indices relevant for quadruplet interactions.

  • +
+
+
Returns:
+

    +
  • cosφ_cab (Tensor, shape = (num_triplets_inint,)) – Cosine of angle between atoms c -> a <- b.

  • +
  • cosφ_abd (Tensor, shape = (num_triplets_qint,)) – Cosine of angle between atoms a -> b -> d.

  • +
  • angle_cabd (Tensor, shape = (num_quadruplets,)) – Dihedral angle between atoms c <- a-b -> d.

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) torch.Tensor#
+

Use a mask to remove values of removed edges and then +duplicate the values for the correct edge direction.

+
+
Parameters:
+
    +
  • tensor (torch.Tensor) – Values to symmetrize for the new tensor.

  • +
  • mask (torch.Tensor) – Mask defining which edges go in the correct direction.

  • +
  • reorder_idx (torch.Tensor) – Indices defining how to reorder the tensor values after +concatenating the edge values of both directions.

  • +
  • opposite_neg (bool) – Whether the edge in the opposite direction should use the +negative tensor value.

  • +
+
+
Returns:
+

tensor_ordered – A tensor with symmetrized values.

+
+
Return type:
+

torch.Tensor

+
+
+
+ +
+
+symmetrize_edges(graph, batch_idx)#
+

Symmetrize edges to ensure existence of counter-directional edges.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. +We only use i->j edges here. So we lose some j->i edges +and add others by making it symmetric.

+
+ +
+
+subselect_edges(data, graph, cutoff=None, max_neighbors=None)#
+

Subselect edges using a stricter cutoff and max_neighbors.

+
+ +
+
+generate_graph_dict(data, cutoff, max_neighbors)#
+

Generate a radius/nearest neighbor graph.

+
+ +
+
+subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig)#
+

If the new cutoff and max_neighbors is different from the original, +subselect the edges of a given graph.

+
+ +
+
+get_graphs_and_indices(data)#
+

“Generate embedding and interaction graphs and indices.

+
+ +
+
+get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms)#
+

Calculate and transform basis functions.

+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/index.html b/autoapi/core/models/gemnet_oc/index.html new file mode 100644 index 000000000..22251ddfb --- /dev/null +++ b/autoapi/core/models/gemnet_oc/index.html @@ -0,0 +1,1049 @@ + + + + + + + + + + + core.models.gemnet_oc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

GemNetOC

+
param num_atoms (int):
+

+
+

+
+
+class core.models.gemnet_oc.GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs)#
+

Bases: fairchem.core.models.base.BaseModel

+
+
Parameters:
+
    +
  • (int) (bond_feat_dim)

  • +
  • (int)

  • +
  • num_targets (int) – Number of prediction targets.

  • +
  • num_spherical (int) – Controls maximum frequency.

  • +
  • num_radial (int) – Controls maximum frequency.

  • +
  • num_blocks (int) – Number of building blocks to be stacked.

  • +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_aint_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • +
  • emb_size_aint_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • num_output_afteratom (int) – Number of residual blocks in the output blocks +after adding the atom embedding.

  • +
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • +
  • num_global_out_layers (int) – Number of final residual blocks before the output.

  • +
  • regress_forces (bool) – Whether to predict forces. Default: True

  • +
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. +If False predict forces based on negative gradient of energy potential.

  • +
  • use_pbc (bool) – Whether to use periodic boundary conditions.

  • +
  • scale_backprop_forces (bool) – Whether to scale up the energy and then scales down the forces +to prevent NaNs and infs in backpropagated forces.

  • +
  • cutoff (float) – Embedding cutoff for interatomic connections and embeddings in Angstrom.

  • +
  • cutoff_qint (float) – Quadruplet interaction cutoff in Angstrom. +Optional. Uses cutoff per default.

  • +
  • cutoff_aeaint (float) – Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. +Optional. Uses cutoff per default.

  • +
  • cutoff_aint (float) – Atom-to-atom interaction cutoff in Angstrom. +Optional. Uses maximum of all other cutoffs per default.

  • +
  • max_neighbors (int) – Maximum number of neighbors for interatomic connections and embeddings.

  • +
  • max_neighbors_qint (int) – Maximum number of quadruplet interactions per embedding. +Optional. Uses max_neighbors per default.

  • +
  • max_neighbors_aeaint (int) – Maximum number of edge-to-atom and atom-to-edge interactions per embedding. +Optional. Uses max_neighbors per default.

  • +
  • max_neighbors_aint (int) – Maximum number of atom-to-atom interactions per atom. +Optional. Uses maximum of all other neighbors per default.

  • +
  • enforce_max_neighbors_strictly (bool) – When subselected edges based on max_neighbors args, arbitrarily +select amongst degenerate edges to have exactly the correct number.

  • +
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • +
  • rbf_spherical (dict) – Name and hyperparameters of the radial basis function used as part of the +circular and spherical bases. +Optional. Uses rbf per default.

  • +
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • +
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • +
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • +
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • +
  • forces_coupled (bool) – If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False.

  • +
  • output_init (str) – Initialization method for the final dense layer.

  • +
  • activation (str) – Name of the activation function.

  • +
  • scale_file (str) – Path to the pytorch file containing the scaling factors.

  • +
  • quad_interaction (bool) – Whether to use quadruplet interactions (with dihedral angles)

  • +
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions

  • +
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions

  • +
  • atom_interaction (bool) – Whether to use atom-to-atom interactions

  • +
  • scale_basis (bool) – Whether to use a scaling layer in the raw basis function for better +numerical stability.

  • +
  • qint_tags (list) – Which atom tags to use quadruplet interactions for. +0=sub-surface bulk, 1=surface, 2=adsorbate atoms.

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)#
+
+ +
+
+set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint)#
+
+ +
+
+init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis)#
+
+ +
+
+init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf)#
+
+ +
+
+calculate_quad_angles(V_st, V_qint_st, quad_idx)#
+

Calculate angles for quadruplet-based message passing.

+
+
Parameters:
+
    +
  • V_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t

  • +
  • V_qint_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t for the quadruplet +interaction graph

  • +
  • quad_idx (dict of torch.Tensor) – Indices relevant for quadruplet interactions.

  • +
+
+
Returns:
+

    +
  • cosφ_cab (Tensor, shape = (num_triplets_inint,)) – Cosine of angle between atoms c -> a <- b.

  • +
  • cosφ_abd (Tensor, shape = (num_triplets_qint,)) – Cosine of angle between atoms a -> b -> d.

  • +
  • angle_cabd (Tensor, shape = (num_quadruplets,)) – Dihedral angle between atoms c <- a-b -> d.

  • +
+

+
+
+
+ +
+
+select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) torch.Tensor#
+

Use a mask to remove values of removed edges and then +duplicate the values for the correct edge direction.

+
+
Parameters:
+
    +
  • tensor (torch.Tensor) – Values to symmetrize for the new tensor.

  • +
  • mask (torch.Tensor) – Mask defining which edges go in the correct direction.

  • +
  • reorder_idx (torch.Tensor) – Indices defining how to reorder the tensor values after +concatenating the edge values of both directions.

  • +
  • opposite_neg (bool) – Whether the edge in the opposite direction should use the +negative tensor value.

  • +
+
+
Returns:
+

tensor_ordered – A tensor with symmetrized values.

+
+
Return type:
+

torch.Tensor

+
+
+
+ +
+
+symmetrize_edges(graph, batch_idx)#
+

Symmetrize edges to ensure existence of counter-directional edges.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. +We only use i->j edges here. So we lose some j->i edges +and add others by making it symmetric.

+
+ +
+
+subselect_edges(data, graph, cutoff=None, max_neighbors=None)#
+

Subselect edges using a stricter cutoff and max_neighbors.

+
+ +
+
+generate_graph_dict(data, cutoff, max_neighbors)#
+

Generate a radius/nearest neighbor graph.

+
+ +
+
+subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig)#
+

If the new cutoff and max_neighbors is different from the original, +subselect the edges of a given graph.

+
+ +
+
+get_graphs_and_indices(data)#
+

“Generate embedding and interaction graphs and indices.

+
+ +
+
+get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms)#
+

Calculate and transform basis functions.

+
+ +
+
+forward(data)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/initializers/index.html b/autoapi/core/models/gemnet_oc/initializers/index.html new file mode 100644 index 000000000..bff54ebeb --- /dev/null +++ b/autoapi/core/models/gemnet_oc/initializers/index.html @@ -0,0 +1,833 @@ + + + + + + + + + + + core.models.gemnet_oc.initializers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.initializers

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.initializers#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

grid_init(→ torch.Tensor)

Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end.

log_grid_init(→ torch.Tensor)

Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end.

get_initializer(name, **init_kwargs)

+
+
+core.models.gemnet_oc.initializers._standardize(kernel)#
+

Makes sure that N*Var(W) = 1 and E[W] = 0

+
+ +
+
+core.models.gemnet_oc.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
+

Generate a weight matrix with variance according to He (Kaiming) initialization. +Based on a random (semi-)orthogonal matrix neural networks +are expected to learn better when features are decorrelated +(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, +“Dropout: a simple way to prevent neural networks from overfitting”, +“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

+
+ +
+
+core.models.gemnet_oc.initializers.grid_init(tensor: torch.Tensor, start: int = -1, end: int = 1) torch.Tensor#
+

Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end.

+
+ +
+
+core.models.gemnet_oc.initializers.log_grid_init(tensor: torch.Tensor, start: int = -4, end: int = 0) torch.Tensor#
+

Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end.

+
+ +
+
+core.models.gemnet_oc.initializers.get_initializer(name, **init_kwargs)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/interaction_indices/index.html b/autoapi/core/models/gemnet_oc/interaction_indices/index.html new file mode 100644 index 000000000..1170ddb4d --- /dev/null +++ b/autoapi/core/models/gemnet_oc/interaction_indices/index.html @@ -0,0 +1,912 @@ + + + + + + + + + + + core.models.gemnet_oc.interaction_indices — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.interaction_indices

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.interaction_indices#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

get_triplets(graph, num_atoms)

Get all input edges b->a for each output edge c->a.

get_mixed_triplets(graph_in, graph_out, num_atoms[, ...])

Get all output edges (ingoing or outgoing) for each incoming edge.

get_quadruplets(main_graph, qint_graph, num_atoms)

Get all d->b for each edge c->a and connection b->a

+
+
+core.models.gemnet_oc.interaction_indices.get_triplets(graph, num_atoms: int)#
+

Get all input edges b->a for each output edge c->a. +It is possible that b=c, as long as the edges are distinct +(i.e. atoms b and c stem from different unit cells).

+
+
Parameters:
+
    +
  • graph (dict of torch.Tensor) – Contains the graph’s edge_index.

  • +
  • num_atoms (int) – Total number of atoms.

  • +
+
+
Returns:
+

+
in: torch.Tensor, shape (num_triplets,)

Indices of input edge b->a of each triplet b->a<-c

+
+
out: torch.Tensor, shape (num_triplets,)

Indices of output edge c->a of each triplet b->a<-c

+
+
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. +Used for creating a padded matrix and aggregating via matmul.

+
+
+

+
+
Return type:
+

Dictionary containing the entries

+
+
+
+ +
+
+core.models.gemnet_oc.interaction_indices.get_mixed_triplets(graph_in, graph_out, num_atoms, to_outedge=False, return_adj=False, return_agg_idx=False)#
+

Get all output edges (ingoing or outgoing) for each incoming edge. +It is possible that in atom=out atom, as long as the edges are distinct +(i.e. they stem from different unit cells). In edges and out edges stem +from separate graphs (hence “mixed”) with shared atoms.

+
+
Parameters:
+
    +
  • graph_in (dict of torch.Tensor) – Contains the input graph’s edge_index and cell_offset.

  • +
  • graph_out (dict of torch.Tensor) – Contains the output graph’s edge_index and cell_offset. +Input and output graphs use the same atoms, but different edges.

  • +
  • num_atoms (int) – Total number of atoms.

  • +
  • to_outedge (bool) – Whether to map the output to the atom’s outgoing edges a->c +instead of the ingoing edges c->a.

  • +
  • return_adj (bool) – Whether to output the adjacency (incidence) matrix between output +edges and atoms adj_edges.

  • +
  • return_agg_idx (bool) – Whether to output the indices enumerating the intermediate edges +of each output edge.

  • +
+
+
Returns:
+

+
in: torch.Tensor, shape (num_triplets,)

Indices of input edges

+
+
out: torch.Tensor, shape (num_triplets,)

Indices of output edges

+
+
adj_edges: SparseTensor, shape (num_edges, num_atoms)

Adjacency (incidence) matrix between output edges and atoms, +with values specifying the input edges. +Only returned if return_adj is True.

+
+
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. +Used for creating a padded matrix and aggregating via matmul. +Only returned if return_agg_idx is True.

+
+
+

+
+
Return type:
+

Dictionary containing the entries

+
+
+
+ +
+
+core.models.gemnet_oc.interaction_indices.get_quadruplets(main_graph, qint_graph, num_atoms)#
+

Get all d->b for each edge c->a and connection b->a +Careful about periodic images! +Separate interaction cutoff not supported.

+
+
Parameters:
+
    +
  • main_graph (dict of torch.Tensor) – Contains the main graph’s edge_index and cell_offset. +The main graph defines which edges are embedded.

  • +
  • qint_graph (dict of torch.Tensor) – Contains the quadruplet interaction graph’s edge_index and +cell_offset. main_graph and qint_graph use the same atoms, +but different edges.

  • +
  • num_atoms (int) – Total number of atoms.

  • +
+
+
Returns:
+

+
triplet_in[‘in’]: torch.Tensor, shape (nTriplets,)

Indices of input edge d->b in triplet d->b->a.

+
+
triplet_in[‘out’]: torch.Tensor, shape (nTriplets,)

Interaction indices of output edge b->a in triplet d->b->a.

+
+
triplet_out[‘in’]: torch.Tensor, shape (nTriplets,)

Interaction indices of input edge b->a in triplet c->a<-b.

+
+
triplet_out[‘out’]: torch.Tensor, shape (nTriplets,)

Indices of output edge c->a in triplet c->a<-b.

+
+
out: torch.Tensor, shape (nQuadruplets,)

Indices of output edge c->a in quadruplet

+
+
trip_in_to_quad: torch.Tensor, shape (nQuadruplets,)

Indices to map from input triplet d->b->a +to quadruplet d->b->a<-c.

+
+
trip_out_to_quad: torch.Tensor, shape (nQuadruplets,)

Indices to map from output triplet c->a<-b +to quadruplet d->b->a<-c.

+
+
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. +Used for creating a padded matrix and aggregating via matmul.

+
+
+

+
+
Return type:
+

Dictionary containing the entries

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.html b/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.html new file mode 100644 index 000000000..e4aa960f1 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/atom_update_block/index.html @@ -0,0 +1,869 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.atom_update_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers.atom_update_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers.atom_update_block#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

+
+
+class core.models.gemnet_oc.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None)#
+

Bases: torch.nn.Module

+

Aggregate the message embeddings of the atoms

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis.

  • +
  • nHidden (int) – Number of residual blocks.

  • +
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+get_mlp(units_in: int, units: int, nHidden: int, activation)#
+
+ +
+
+forward(h: torch.Tensor, m, basis_rad, idx_atom)#
+
+
Returns:
+

h – Atom embedding.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size_atom)

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, nHidden_afteratom: int, activation: str | None = None, direct_forces: bool = True)#
+

Bases: AtomUpdateBlock

+

Combines the atom update block and subsequent final dense layer.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis.

  • +
  • nHidden (int) – Number of residual blocks before adding the atom embedding.

  • +
  • nHidden_afteratom (int) – Number of residual blocks after adding the atom embedding.

  • +
  • activation (str) – Name of the activation function to use in the dense layers.

  • +
  • direct_forces (bool) – If true directly predict forces, i.e. without taking the gradient +of the energy potential.

  • +
+
+
+
+
+forward(h: torch.Tensor, m: torch.Tensor, basis_rad, idx_atom)#
+
+
Returns:
+

    +
  • torch.Tensor, shape=(nAtoms, emb_size_atom) – Output atom embeddings.

  • +
  • torch.Tensor, shape=(nEdges, emb_size_edge) – Output edge embeddings.

  • +
+

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/base_layers/index.html b/autoapi/core/models/gemnet_oc/layers/base_layers/index.html new file mode 100644 index 000000000..41fa1eaad --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/base_layers/index.html @@ -0,0 +1,900 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.base_layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers.base_layers

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers.base_layers#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

Dense

Combines dense layer with scaling for silu activation.

ScaledSiLU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

+
+
+class core.models.gemnet_oc.layers.base_layers.Dense(in_features: int, out_features: int, bias: bool = False, activation: str | None = None)#
+

Bases: torch.nn.Module

+

Combines dense layer with scaling for silu activation.

+
+
Parameters:
+
    +
  • in_features (int) – Input embedding size.

  • +
  • out_features (int) – Output embedding size.

  • +
  • bias (bool) – True if use bias.

  • +
  • activation (str) – Name of the activation function to use.

  • +
+
+
+
+
+reset_parameters(initializer=he_orthogonal_init) None#
+
+ +
+
+forward(x)#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.base_layers.ScaledSiLU#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x)#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
+

Bases: torch.nn.Module

+

Residual block with output scaled by 1/sqrt(2).

+
+
Parameters:
+
    +
  • units (int) – Input and output embedding size.

  • +
  • nLayers (int) – Number of dense layers.

  • +
  • layer (torch.nn.Module) – Class for the layers inside the residual block.

  • +
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • +
+
+
+
+
+forward(input)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/basis_utils/index.html b/autoapi/core/models/gemnet_oc/layers/basis_utils/index.html new file mode 100644 index 000000000..e668938ab --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/basis_utils/index.html @@ -0,0 +1,940 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.basis_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.layers.basis_utils#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel

sph_harm_prefactor(l_degree, m_order)

Computes the constant pre-factor for the spherical harmonic

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials

real_sph_harm(→ None)

Computes formula strings of the the real part of the spherical harmonics

get_sph_harm_basis(L_maxdegree[, zero_m_only])

Get a function calculating the spherical harmonics basis from z and phi.

+
+
+core.models.gemnet_oc.layers.basis_utils.Jn(r: float, n: int)#
+

numerical spherical bessel functions of order n

+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.Jn_zeros(n: int, k: int)#
+

Compute the first k zeros of the spherical bessel functions +up to order n (excluded)

+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.spherical_bessel_formulas(n: int)#
+

Computes the sympy formulas for the spherical bessel functions +up to order n (excluded)

+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.bessel_basis(n: int, k: int)#
+

Compute the sympy formulas for the normalized and rescaled spherical bessel +functions up to order n (excluded) and maximum frequency k (excluded).

+
+
Returns:
+

bess_basis – Bessel basis formulas taking in a single argument x. +Has length n where each element has length k. -> In total n*k many.

+
+
Return type:
+

list

+
+
+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int)#
+

Computes the constant pre-factor for the spherical harmonic +of degree l and order m.

+
+
Parameters:
+
    +
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • +
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • +
+
+
Returns:
+

factor

+
+
Return type:
+

float

+
+
+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
+

Computes string formulas of the associated legendre polynomials +up to degree L (excluded).

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials +(degree L is excluded).

  • +
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • +
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. +Overwritten by zero_m_only.

  • +
+
+
Returns:
+

polynomials – Contains the sympy functions of the polynomials +(in total L many if zero_m_only is True else L^2 many).

+
+
Return type:
+

list

+
+
+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) None#
+

Computes formula strings of the the real part of the spherical harmonics +up to degree L (excluded). Variables are either spherical coordinates phi +and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

+
+
Parameters:
+
    +
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics +(degree L is excluded).

  • +
  • use_theta (bool) –

      +
    • True: Expects the input of the formula strings to contain theta.

    • +
    • False: Expects the input of the formula strings to contain z.

    • +
    +

  • +
  • use_phi (bool) –

      +
    • True: Expects the input of the formula strings to contain phi.

    • +
    • False: Expects the input of the formula strings to contain x and y.

    • +
    +

    Does nothing if zero_m_only is True

    +

  • +
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • +
+
+
Returns:
+

Y_lm_real – Computes formula strings of the the real part of the spherical +harmonics up to degree L (where degree L is not excluded). +In total L^2 many sph harm exist up to degree L (excluded). +However, if zero_m_only only is True then the total count +is reduced to L.

+
+
Return type:
+

list

+
+
+
+ +
+
+core.models.gemnet_oc.layers.basis_utils.get_sph_harm_basis(L_maxdegree: int, zero_m_only: bool = True)#
+

Get a function calculating the spherical harmonics basis from z and phi.

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/efficient/index.html b/autoapi/core/models/gemnet_oc/layers/efficient/index.html new file mode 100644 index 000000000..3ba88827f --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/efficient/index.html @@ -0,0 +1,904 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.efficient — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.layers.efficient#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

BasisEmbedding

Embed a basis (CBF, SBF), optionally using the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
+class core.models.gemnet_oc.layers.efficient.BasisEmbedding(num_radial: int, emb_size_interm: int, num_spherical: int | None = None)#
+

Bases: torch.nn.Module

+

Embed a basis (CBF, SBF), optionally using the efficient reformulation.

+
+
Parameters:
+
    +
  • num_radial (int) – Number of radial basis functions.

  • +
  • emb_size_interm (int) – Intermediate embedding size of triplets/quadruplets.

  • +
  • num_spherical (int) – Number of circular/spherical basis functions. +Only required if there is a circular/spherical basis.

  • +
+
+
+
+
+weight: torch.nn.Parameter#
+
+ +
+
+reset_parameters() None#
+
+ +
+
+forward(rad_basis, sph_basis=None, idx_rad_outer=None, idx_rad_inner=None, idx_sph_outer=None, idx_sph_inner=None, num_atoms=None)#
+
+
Parameters:
+
    +
  • rad_basis (torch.Tensor, shape=(num_edges, num_radial or num_orders * num_radial)) – Raw radial basis.

  • +
  • sph_basis (torch.Tensor, shape=(num_triplets or num_quadruplets, num_spherical)) – Raw spherical or circular basis.

  • +
  • idx_rad_outer (torch.Tensor, shape=(num_edges)) – Atom associated with each radial basis value. +Optional, used for efficient edge aggregation.

  • +
  • idx_rad_inner (torch.Tensor, shape=(num_edges)) – Enumerates radial basis values per atom. +Optional, used for efficient edge aggregation.

  • +
  • idx_sph_outer (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Edge associated with each circular/spherical basis value. +Optional, used for efficient triplet/quadruplet aggregation.

  • +
  • idx_sph_inner (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Enumerates circular/spherical basis values per edge. +Optional, used for efficient triplet/quadruplet aggregation.

  • +
  • num_atoms (int) – Total number of atoms. +Optional, used for efficient edge aggregation.

  • +
+
+
Returns:
+

    +
  • rad_W1 (torch.Tensor, shape=(num_edges, emb_size_interm, num_spherical))

  • +
  • sph (torch.Tensor, shape=(num_edges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.efficient.EfficientInteractionBilinear(emb_size_in: int, emb_size_interm: int, emb_size_out: int)#
+

Bases: torch.nn.Module

+

Efficient reformulation of the bilinear layer and subsequent summation.

+
+
Parameters:
+
    +
  • emb_size_in (int) – Embedding size of input triplets/quadruplets.

  • +
  • emb_size_interm (int) – Intermediate embedding size of the basis transformation.

  • +
  • emb_size_out (int) – Embedding size of output triplets/quadruplets.

  • +
+
+
+
+
+forward(basis, m, idx_agg_outer, idx_agg_inner, idx_agg2_outer=None, idx_agg2_inner=None, agg2_out_size=None)#
+
+
Parameters:
+
    +
  • basis (Tuple (torch.Tensor, torch.Tensor),) –

    +
    shapes=((num_edges, emb_size_interm, num_spherical),

    (num_edges, num_spherical, Kmax))

    +
    +
    +

    First element: Radial basis multiplied with weight matrix +Second element: Circular/spherical basis

    +

  • +
  • m (torch.Tensor, shape=(num_edges, emb_size_in)) – Input edge embeddings

  • +
  • idx_agg_outer (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Output edge aggregating this intermediate triplet/quadruplet edge.

  • +
  • idx_agg_inner (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Enumerates intermediate edges per output edge.

  • +
  • idx_agg2_outer (torch.Tensor, shape=(num_edges)) – Output atom aggregating this edge.

  • +
  • idx_agg2_inner (torch.Tensor, shape=(num_edges)) – Enumerates edges per output atom.

  • +
  • agg2_out_size (int) – Number of output embeddings when aggregating twice. Typically +the number of atoms.

  • +
+
+
Returns:
+

m_ca – Aggregated edge/atom embeddings.

+
+
Return type:
+

torch.Tensor, shape=(num_edges, emb_size)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/embedding_block/index.html b/autoapi/core/models/gemnet_oc/layers/embedding_block/index.html new file mode 100644 index 000000000..4168c911a --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/embedding_block/index.html @@ -0,0 +1,859 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.embedding_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers.embedding_block

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers.embedding_block#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings

+
+
+class core.models.gemnet_oc.layers.embedding_block.AtomEmbedding(emb_size: int, num_elements: int)#
+

Bases: torch.nn.Module

+

Initial atom embeddings based on the atom type

+
+
Parameters:
+

emb_size (int) – Atom embeddings size

+
+
+
+
+forward(Z) torch.Tensor#
+
+
Returns:
+

h – Atom embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nAtoms, emb_size)

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.embedding_block.EdgeEmbedding(atom_features: int, edge_features: int, out_features: int, activation: str | None = None)#
+

Bases: torch.nn.Module

+

Edge embedding based on the concatenation of atom embeddings +and a subsequent dense layer.

+
+
Parameters:
+
    +
  • atom_features (int) – Embedding size of the atom embedding.

  • +
  • edge_features (int) – Embedding size of the input edge embedding.

  • +
  • out_features (int) – Embedding size after the dense layer.

  • +
  • activation (str) – Activation function used in the dense layer.

  • +
+
+
+
+
+forward(h: torch.Tensor, m: torch.Tensor, edge_index) torch.Tensor#
+
+
Parameters:
+
    +
  • h (torch.Tensor, shape (num_atoms, atom_features)) – Atom embeddings.

  • +
  • m (torch.Tensor, shape (num_edges, edge_features)) – Radial basis in embedding block, +edge embedding in interaction block.

  • +
+
+
Returns:
+

m_st – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/force_scaler/index.html b/autoapi/core/models/gemnet_oc/layers/force_scaler/index.html new file mode 100644 index 000000000..4e00bfee1 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/force_scaler/index.html @@ -0,0 +1,826 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.force_scaler — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers.force_scaler

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers.force_scaler#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

ForceScaler

Scales up the energy and then scales down the forces

+
+
+class core.models.gemnet_oc.layers.force_scaler.ForceScaler(init_scale: float = 2.0**8, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, max_force_iters: int = 50, enabled: bool = True)#
+

Scales up the energy and then scales down the forces +to prevent NaNs and infs in calculations using AMP. +Inspired by torch.cuda.amp.GradScaler.

+
+
+scale(energy)#
+
+ +
+
+unscale(forces)#
+
+ +
+
+calc_forces(energy, pos)#
+
+ +
+
+calc_forces_and_update(energy, pos)#
+
+ +
+
+update() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/index.html b/autoapi/core/models/gemnet_oc/layers/index.html new file mode 100644 index 000000000..ba4101683 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/index.html @@ -0,0 +1,767 @@ + + + + + + + + + + + core.models.gemnet_oc.layers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/interaction_block/index.html b/autoapi/core/models/gemnet_oc/layers/interaction_block/index.html new file mode 100644 index 000000000..7219e41f0 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/interaction_block/index.html @@ -0,0 +1,977 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.interaction_block — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.layers.interaction_block#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

InteractionBlock

Interaction block for GemNet-Q/dQ.

QuadrupletInteraction

Quadruplet-based message passing block.

TripletInteraction

Triplet-based message passing block.

PairInteraction

Pair-based message passing block.

+
+
+class core.models.gemnet_oc.layers.interaction_block.InteractionBlock(emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_a2a_in: int, emb_size_a2a_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_atom_emb_layers: int = 0, quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, activation=None)#
+

Bases: torch.nn.Module

+

Interaction block for GemNet-Q/dQ.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_a2a_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • +
  • emb_size_a2a_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • +
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • +
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • +
  • num_concat (int) – Number of residual blocks after the concatenation.

  • +
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • +
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • +
  • quad_interaction (bool) – Whether to use quadruplet interactions.

  • +
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions.

  • +
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions.

  • +
  • atom_interaction (bool) – Whether to use atom-to-atom interactions.

  • +
  • activation (str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+forward(h, m, bases_qint, bases_e2e, bases_a2e, bases_e2a, basis_a2a_rad, basis_atom_update, edge_index_main, a2ee2a_graph, a2a_graph, id_swap, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx)#
+
+
Returns:
+

    +
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • +
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • +
+

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.interaction_block.QuadrupletInteraction(emb_size_edge, emb_size_quad_in, emb_size_quad_out, emb_size_rbf, emb_size_cbf, emb_size_sbf, symmetric_mp=True, activation=None)#
+

Bases: torch.nn.Module

+

Quadruplet-based message passing block.

+
+
Parameters:
+
    +
  • emb_size_edge (int) – Embedding size of the edges.

  • +
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • +
  • symmetric_mp (bool) – Whether to use symmetric message passing and +update the edges in both directions.

  • +
  • activation (str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+forward(m, bases, idx, id_swap)#
+
+
Returns:
+

m – Edge embeddings (c->a).

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size_edge)

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.interaction_block.TripletInteraction(emb_size_in: int, emb_size_out: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_rbf: int, emb_size_cbf: int, symmetric_mp: bool = True, swap_output: bool = True, activation=None)#
+

Bases: torch.nn.Module

+

Triplet-based message passing block.

+
+
Parameters:
+
    +
  • emb_size_in (int) – Embedding size of the input embeddings.

  • +
  • emb_size_out (int) – Embedding size of the output embeddings.

  • +
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings +before the bilinear layer.

  • +
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings +after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • +
  • symmetric_mp (bool) – Whether to use symmetric message passing and +update the edges in both directions.

  • +
  • swap_output (bool) – Whether to swap the output embedding directions. +Only relevant if symmetric_mp is False.

  • +
  • activation (str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+forward(m, bases, idx, id_swap, expand_idx=None, idx_agg2=None, idx_agg2_inner=None, agg2_out_size=None)#
+
+
Returns:
+

m – Edge embeddings.

+
+
Return type:
+

torch.Tensor, shape=(nEdges, emb_size_edge)

+
+
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.interaction_block.PairInteraction(emb_size_atom, emb_size_pair_in, emb_size_pair_out, emb_size_rbf, activation=None)#
+

Bases: torch.nn.Module

+

Pair-based message passing block.

+
+
Parameters:
+
    +
  • emb_size_atom (int) – Embedding size of the atoms.

  • +
  • emb_size_pair_in (int) – Embedding size of the atom pairs before the bilinear layer.

  • +
  • emb_size_pair_out (int) – Embedding size of the atom pairs after the bilinear layer.

  • +
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • +
  • activation (str) – Name of the activation function to use in the dense layers.

  • +
+
+
+
+
+forward(h, rad_basis, edge_index, target_neighbor_idx)#
+
+
Returns:
+

h – Atom embeddings.

+
+
Return type:
+

torch.Tensor, shape=(num_atoms, emb_size_atom)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/radial_basis/index.html b/autoapi/core/models/gemnet_oc/layers/radial_basis/index.html new file mode 100644 index 000000000..be0b88a78 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/radial_basis/index.html @@ -0,0 +1,986 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.radial_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.layers.radial_basis#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + +

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

GaussianBasis

Base class for all neural network modules.

SphericalBesselBasis

First-order spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

+
param num_radial:
+

Number of basis functions. Controls the maximum frequency.

+
+
+

+
+
+class core.models.gemnet_oc.layers.radial_basis.PolynomialEnvelope(exponent: int)#
+

Bases: torch.nn.Module

+

Polynomial envelope function that ensures a smooth cutoff.

+
+
Parameters:
+

exponent (int) – Exponent of the envelope function.

+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.radial_basis.ExponentialEnvelope#
+

Bases: torch.nn.Module

+

Exponential envelope function that ensures a smooth cutoff, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.radial_basis.GaussianBasis(start: float = 0.0, stop: float = 5.0, num_gaussians: int = 50, trainable: bool = False)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
+

Bases: torch.nn.Module

+

First-order spherical Bessel basis

+
+
Parameters:
+
    +
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
+

Bases: torch.nn.Module

+

Bernstein polynomial basis, +as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. +SpookyNet: Learning Force Fields with Electronic Degrees of Freedom +and Nonlocal Effects

+
+
Parameters:
+
    +
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • +
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. +Default: gamma = 0.5 * a_0**-1 = 0.94486, +inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • +
+
+
+
+
+forward(d_scaled: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, scale_basis: bool = False)#
+

Bases: torch.nn.Module

+
+
Parameters:
+
    +
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • +
  • cutoff (float) – Cutoff distance in Angstrom.

  • +
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • +
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • +
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • +
+
+
+
+
+forward(d: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.html b/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.html new file mode 100644 index 000000000..83ba8c632 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.html @@ -0,0 +1,840 @@ + + + + + + + + + + + core.models.gemnet_oc.layers.spherical_basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.gemnet_oc.layers.spherical_basis

+ +
+ +
+
+ + + + +
+ +
+

core.models.gemnet_oc.layers.spherical_basis#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

CircularBasisLayer

2D Fourier Bessel Basis

SphericalBasisLayer

3D Fourier Bessel Basis

+
+
+class core.models.gemnet_oc.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_oc.layers.radial_basis.RadialBasis, cbf: dict, scale_basis: bool = False)#
+

Bases: torch.nn.Module

+

2D Fourier Bessel Basis

+
+
Parameters:
+
    +
  • num_spherical (int) – Number of basis functions. Controls the maximum frequency.

  • +
  • radial_basis (RadialBasis) – Radial basis function.

  • +
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • +
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • +
+
+
+
+
+forward(D_ca, cosφ_cab)#
+
+ +
+ +
+
+class core.models.gemnet_oc.layers.spherical_basis.SphericalBasisLayer(num_spherical: int, radial_basis: core.models.gemnet_oc.layers.radial_basis.RadialBasis, sbf: dict, scale_basis: bool = False)#
+

Bases: torch.nn.Module

+

3D Fourier Bessel Basis

+
+
Parameters:
+
    +
  • num_spherical (int) – Number of basis functions. Controls the maximum frequency.

  • +
  • radial_basis (RadialBasis) – Radial basis functions.

  • +
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • +
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • +
+
+
+
+
+forward(D_ca, cosφ_cab, θ_cabd)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/gemnet_oc/utils/index.html b/autoapi/core/models/gemnet_oc/utils/index.html new file mode 100644 index 000000000..f1d427108 --- /dev/null +++ b/autoapi/core/models/gemnet_oc/utils/index.html @@ -0,0 +1,1010 @@ + + + + + + + + + + + core.models.gemnet_oc.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.gemnet_oc.utils#

+

Copyright (c) Meta, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ragged_range(sizes)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

masked_select_sparsetensor_flat(...)

calculate_interatomic_vectors(R, id_s, id_t, offsets_st)

Calculate the vectors connecting the given atom pairs,

inner_product_clamped(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

get_angle(→ torch.Tensor)

Calculate angles between atoms c -> a <- b.

vector_rejection(R_ab, P_n)

Project the vector R_ab onto a plane with normal vector P_n.

get_projected_angle(→ torch.Tensor)

Project the vector R_ab onto a plane with normal vector P_n,

mask_neighbors(neighbors, edge_mask)

get_neighbor_order(→ torch.Tensor)

Give a mask that filters out edges so that each atom has at most

get_inner_idx(idx, dim_size)

Assign an inner index to each element (neighbor) with the same index.

get_edge_id(edge_idx, cell_offsets, num_atoms)

+
+
+core.models.gemnet_oc.utils.ragged_range(sizes)#
+

Multiple concatenated ranges.

+

Examples

+

sizes = [1 4 2 3] +Return: [0 0 1 2 3 0 1 0 1 2]

+
+ +
+
+core.models.gemnet_oc.utils.repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
+

Repeat blocks of indices. +Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

+

continuous_indexing: Whether to keep increasing the index after each block +start_idx: Starting index +block_inc: Number to increment by after each block,

+
+

either global or per block. Shape: len(sizes) - 1

+
+
+
repeat_inc: Number to increment by after each repetition,

either global or per block

+
+
+

Examples

+

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False +Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +repeat_inc = 4 +Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +start_idx = 5 +Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +block_inc = 1 +Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] +sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 1 2 0 1 2 3 4 3 4 3 4] +sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True +Return: [0 1 0 1 5 6 5 6]

+
+ +
+
+core.models.gemnet_oc.utils.masked_select_sparsetensor_flat(src, mask) torch_sparse.SparseTensor#
+
+ +
+
+core.models.gemnet_oc.utils.calculate_interatomic_vectors(R, id_s, id_t, offsets_st)#
+

Calculate the vectors connecting the given atom pairs, +considering offsets from periodic boundary conditions (PBC).

+
+
Parameters:
+
    +
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • +
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • +
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • +
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. +Subtract this from the correct direction.

  • +
+
+
Returns:
+

(D_st, V_st)

+
+
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

+
+
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

+
+
+

+
+
Return type:
+

tuple

+
+
+
+ +
+
+core.models.gemnet_oc.utils.inner_product_clamped(x, y) torch.Tensor#
+

Calculate the inner product between the given normalized vectors, +giving a result between -1 and 1.

+
+ +
+
+core.models.gemnet_oc.utils.get_angle(R_ac, R_ab) torch.Tensor#
+

Calculate angles between atoms c -> a <- b.

+
+
Parameters:
+
    +
  • R_ac (Tensor, shape = (N, 3)) – Vector from atom a to c.

  • +
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • +
+
+
Returns:
+

angle_cab – Angle between atoms c <- a -> b.

+
+
Return type:
+

Tensor, shape = (N,)

+
+
+
+ +
+
+core.models.gemnet_oc.utils.vector_rejection(R_ab, P_n)#
+

Project the vector R_ab onto a plane with normal vector P_n.

+
+
Parameters:
+
    +
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • +
  • P_n (Tensor, shape = (N, 3)) – Normal vector of a plane onto which to project R_ab.

  • +
+
+
Returns:
+

R_ab_proj – Projected vector (orthogonal to P_n).

+
+
Return type:
+

Tensor, shape = (N, 3)

+
+
+
+ +
+
+core.models.gemnet_oc.utils.get_projected_angle(R_ab, P_n, eps: float = 0.0001) torch.Tensor#
+

Project the vector R_ab onto a plane with normal vector P_n, +then calculate the angle w.r.t. the (x [cross] P_n), +or (y [cross] P_n) if the former would be ill-defined/numerically unstable.

+
+
Parameters:
+
    +
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • +
  • P_n (Tensor, shape = (N, 3)) – Normal vector of a plane onto which to project R_ab.

  • +
  • eps (float) – Norm of projection below which to use the y-axis instead of x.

  • +
+
+
Returns:
+

angle_ab – Angle on plane w.r.t. x- or y-axis.

+
+
Return type:
+

Tensor, shape = (N)

+
+
+
+ +
+
+core.models.gemnet_oc.utils.mask_neighbors(neighbors, edge_mask)#
+
+ +
+
+core.models.gemnet_oc.utils.get_neighbor_order(num_atoms: int, index, atom_distance) torch.Tensor#
+

Give a mask that filters out edges so that each atom has at most +max_num_neighbors_threshold neighbors.

+
+ +
+
+core.models.gemnet_oc.utils.get_inner_idx(idx, dim_size)#
+

Assign an inner index to each element (neighbor) with the same index. +For example, with idx=[0 0 0 1 1 1 1 2 2] this returns [0 1 2 0 1 2 3 0 1]. +These indices allow reshape neighbor indices into a dense matrix. +idx has to be sorted for this to work.

+
+ +
+
+core.models.gemnet_oc.utils.get_edge_id(edge_idx, cell_offsets, num_atoms: int)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/index.html b/autoapi/core/models/index.html new file mode 100644 index 000000000..1941d3dd3 --- /dev/null +++ b/autoapi/core/models/index.html @@ -0,0 +1,936 @@ + + + + + + + + + + + core.models — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models

+ +
+ +
+
+ + + + +
+ +
+

core.models#

+
+

Subpackages#

+
+ +
+
+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Functions#

+ + + + + + +

model_name_to_local_file(→ str)

Download a pretrained checkpoint if it does not exist already

+
+
+

Attributes#

+ + + + + + +

available_pretrained_models

+
+
+core.models.available_pretrained_models#
+
+ +
+
+core.models.model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) str#
+

Download a pretrained checkpoint if it does not exist already

+
+
Parameters:
+
    +
  • model_name (str) – the model name. See available_pretrained_checkpoints.

  • +
  • local_cache (str or Path) – path to local cache directory

  • +
+
+
Returns:
+

local path to checkpoint file

+
+
Return type:
+

str

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/model_registry/index.html b/autoapi/core/models/model_registry/index.html new file mode 100644 index 000000000..f1791a384 --- /dev/null +++ b/autoapi/core/models/model_registry/index.html @@ -0,0 +1,828 @@ + + + + + + + + + + + core.models.model_registry — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.model_registry

+ +
+ +
+
+ + + + +
+ +
+

core.models.model_registry#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

model_name_to_local_file(→ str)

Download a pretrained checkpoint if it does not exist already

+
+
+

Attributes#

+ + + + + + + + + +

MODEL_REGISTRY

available_pretrained_models

+
+
+core.models.model_registry.MODEL_REGISTRY#
+
+ +
+
+core.models.model_registry.available_pretrained_models#
+
+ +
+
+core.models.model_registry.model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) str#
+

Download a pretrained checkpoint if it does not exist already

+
+
Parameters:
+
    +
  • model_name (str) – the model name. See available_pretrained_checkpoints.

  • +
  • local_cache (str or Path) – path to local cache directory

  • +
+
+
Returns:
+

local path to checkpoint file

+
+
Return type:
+

str

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/painn/index.html b/autoapi/core/models/painn/index.html new file mode 100644 index 000000000..f78c2610c --- /dev/null +++ b/autoapi/core/models/painn/index.html @@ -0,0 +1,860 @@ + + + + + + + + + + + core.models.painn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.painn#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

PaiNN

PaiNN model based on the description in Schütt et al. (2021):

+
+
+class core.models.painn.PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

PaiNN model based on the description in Schütt et al. (2021): +Equivariant message passing for the prediction of tensorial properties +and molecular spectra, https://arxiv.org/abs/2102.03150.

+
+
+property num_params: int#
+
+ +
+
+reset_parameters() None#
+
+ +
+
+select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
+
+ +
+
+symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg)#
+

Symmetrize edges to ensure existence of counter-directional edges.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. +If symmetric_edge_symmetrization is False, +we only use i->j edges here. So we lose some j->i edges +and add others by making it symmetric. +If symmetric_edge_symmetrization is True, +we always use both directions.

+
+ +
+
+generate_graph_values(data)#
+
+ +
+
+forward(data)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/painn/painn/index.html b/autoapi/core/models/painn/painn/index.html new file mode 100644 index 000000000..f614abe5c --- /dev/null +++ b/autoapi/core/models/painn/painn/index.html @@ -0,0 +1,1150 @@ + + + + + + + + + + + core.models.painn.painn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.painn.painn#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

+

MIT License

+

Copyright (c) 2021 www.compscience.org

+

Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions:

+

The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software.

+

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

PaiNN

PaiNN model based on the description in Schütt et al. (2021):

PaiNNMessage

Base class for creating message passing layers of the form

PaiNNUpdate

Base class for all neural network modules.

PaiNNOutput

Base class for all neural network modules.

GatedEquivariantBlock

Gated Equivariant Block as defined in Schütt et al. (2021):

+
+
+class core.models.painn.painn.PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None)#
+

Bases: fairchem.core.models.base.BaseModel

+

PaiNN model based on the description in Schütt et al. (2021): +Equivariant message passing for the prediction of tensorial properties +and molecular spectra, https://arxiv.org/abs/2102.03150.

+
+
+property num_params: int#
+
+ +
+
+reset_parameters() None#
+
+ +
+
+select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
+
+ +
+
+symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg)#
+

Symmetrize edges to ensure existence of counter-directional edges.

+

Some edges are only present in one direction in the data, +since every atom has a maximum number of neighbors. +If symmetric_edge_symmetrization is False, +we only use i->j edges here. So we lose some j->i edges +and add others by making it symmetric. +If symmetric_edge_symmetrization is True, +we always use both directions.

+
+ +
+
+generate_graph_values(data)#
+
+ +
+
+forward(data)#
+
+ +
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+class core.models.painn.painn.PaiNNMessage(hidden_channels, num_rbf)#
+

Bases: torch_geometric.nn.MessagePassing

+

Base class for creating message passing layers of the form

+
+\[\mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, +\bigoplus_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} +\left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right),\]
+

where \(\bigoplus\) denotes a differentiable, permutation invariant +function, e.g., sum, mean, min, max or mul, and +\(\gamma_{\mathbf{\Theta}}\) and \(\phi_{\mathbf{\Theta}}\) denote +differentiable functions such as MLPs. +See here for the accompanying tutorial.

+
+
Parameters:
+
    +
  • aggr (str or [str] or Aggregation, optional) – The aggregation scheme +to use, e.g., "add", "sum" "mean", +"min", "max" or "mul". +In addition, can be any +Aggregation module (or any string +that automatically resolves to it). +If given as a list, will make use of multiple aggregations in which +different outputs will get concatenated in the last dimension. +If set to None, the MessagePassing instantiation is +expected to implement its own aggregation logic via +aggregate(). (default: "add")

  • +
  • aggr_kwargs (Dict[str, Any], optional) – Arguments passed to the +respective aggregation function in case it gets automatically +resolved. (default: None)

  • +
  • flow (str, optional) – The flow direction of message passing +("source_to_target" or "target_to_source"). +(default: "source_to_target")

  • +
  • node_dim (int, optional) – The axis along which to propagate. +(default: -2)

  • +
  • decomposed_layers (int, optional) – The number of feature decomposition +layers, as introduced in the “Optimizing Memory Efficiency of +Graph Neural Networks on Edge Computing Platforms” paper. +Feature decomposition reduces the peak memory usage by slicing +the feature dimensions into separated feature decomposition layers +during GNN aggregation. +This method can accelerate GNN execution on CPU-based platforms +(e.g., 2-3x speedup on the +Reddit dataset) for common GNN +models such as GCN, +GraphSAGE, +GIN, etc. +However, this method is not applicable to all GNN operators +available, in particular for operators in which message computation +can not easily be decomposed, e.g. in attention-based GNNs. +The selection of the optimal value of decomposed_layers +depends both on the specific graph dataset and available hardware +resources. +A value of 2 is suitable in most cases. +Although the peak memory usage is directly associated with the +granularity of feature decomposition, the same is not necessarily +true for execution speedups. (default: 1)

  • +
+
+
+
+
+reset_parameters() None#
+

Resets all learnable parameters of the module.

+
+ +
+
+forward(x, vec, edge_index, edge_rbf, edge_vector)#
+

Runs the forward pass of the module.

+
+ +
+
+message(xh_j, vec_j, rbfh_ij, r_ij)#
+

Constructs messages from node \(j\) to node \(i\) +in analogy to \(\phi_{\mathbf{\Theta}}\) for each edge in +edge_index. +This function can take any argument as input which was initially +passed to propagate(). +Furthermore, tensors passed to propagate() can be mapped to the +respective nodes \(i\) and \(j\) by appending _i or +_j to the variable name, .e.g. x_i and x_j.

+
+ +
+
+aggregate(features: tuple[torch.Tensor, torch.Tensor], index: torch.Tensor, dim_size: int) tuple[torch.Tensor, torch.Tensor]#
+

Aggregates messages from neighbors as +\(\bigoplus_{j \in \mathcal{N}(i)}\).

+

Takes in the output of message computation as first argument and any +argument which was initially passed to propagate().

+

By default, this function will delegate its call to the underlying +Aggregation module to reduce messages +as specified in __init__() by the aggr argument.

+
+ +
+
+update(inputs: tuple[torch.Tensor, torch.Tensor]) tuple[torch.Tensor, torch.Tensor]#
+

Updates node embeddings in analogy to +\(\gamma_{\mathbf{\Theta}}\) for each node +\(i \in \mathcal{V}\). +Takes in the output of aggregation as first argument and any argument +which was initially passed to propagate().

+
+ +
+ +
+
+class core.models.painn.painn.PaiNNUpdate(hidden_channels)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(x, vec)#
+
+ +
+ +
+
+class core.models.painn.painn.PaiNNOutput(hidden_channels)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+reset_parameters() None#
+
+ +
+
+forward(x, vec)#
+
+ +
+ +
+
+class core.models.painn.painn.GatedEquivariantBlock(hidden_channels, out_channels)#
+

Bases: torch.nn.Module

+

Gated Equivariant Block as defined in Schütt et al. (2021): +Equivariant message passing for the prediction of tensorial properties and molecular spectra

+
+
+reset_parameters() None#
+
+ +
+
+forward(x, v)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/painn/utils/index.html b/autoapi/core/models/painn/utils/index.html new file mode 100644 index 000000000..f4329e467 --- /dev/null +++ b/autoapi/core/models/painn/utils/index.html @@ -0,0 +1,824 @@ + + + + + + + + + + + core.models.painn.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.painn.utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.painn.utils#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

get_edge_id(edge_idx, cell_offsets, num_atoms)

+
+
+core.models.painn.utils.repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
+

Repeat blocks of indices. +Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

+

continuous_indexing: Whether to keep increasing the index after each block +start_idx: Starting index +block_inc: Number to increment by after each block,

+
+

either global or per block. Shape: len(sizes) - 1

+
+
+
repeat_inc: Number to increment by after each repetition,

either global or per block

+
+
+

Examples

+

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False +Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +repeat_inc = 4 +Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +start_idx = 5 +Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] +sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; +block_inc = 1 +Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] +sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True +Return: [0 1 2 0 1 2 3 4 3 4 3 4] +sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True +Return: [0 1 0 1 5 6 5 6]

+
+ +
+
+core.models.painn.utils.get_edge_id(edge_idx, cell_offsets, num_atoms: int)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/schnet/index.html b/autoapi/core/models/schnet/index.html new file mode 100644 index 000000000..ed78b955b --- /dev/null +++ b/autoapi/core/models/schnet/index.html @@ -0,0 +1,860 @@ + + + + + + + + + + + core.models.schnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.schnet

+ +
+ +
+
+ + + + +
+ +
+

core.models.schnet#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

SchNetWrap

Wrapper around the continuous-filter convolutional neural network SchNet from the

+
+
+class core.models.schnet.SchNetWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, hidden_channels: int = 128, num_filters: int = 128, num_interactions: int = 6, num_gaussians: int = 50, cutoff: float = 10.0, readout: str = 'add')#
+

Bases: torch_geometric.nn.SchNet, fairchem.core.models.base.BaseModel

+

Wrapper around the continuous-filter convolutional neural network SchNet from the +“SchNet: A Continuous-filter Convolutional Neural Network for Modeling +Quantum Interactions”. Each layer uses interaction +block of the form:

+
+\[\mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot +h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))),\]
+
+
Parameters:
+
    +
  • num_atoms (int) – Unused argument

  • +
  • bond_feat_dim (int) – Unused argument

  • +
  • num_targets (int) – Number of targets to predict.

  • +
  • use_pbc (bool, optional) – If set to True, account for periodic boundary conditions. +(default: True)

  • +
  • regress_forces (bool, optional) – If set to True, predict forces by differentiating +energy with respect to positions. +(default: True)

  • +
  • otf_graph (bool, optional) – If set to True, compute graph edges on the fly. +(default: False)

  • +
  • hidden_channels (int, optional) – Number of hidden channels. +(default: 128)

  • +
  • num_filters (int, optional) – Number of filters to use. +(default: 128)

  • +
  • num_interactions (int, optional) – Number of interaction blocks +(default: 6)

  • +
  • num_gaussians (int, optional) – The number of gaussians \(\mu\). +(default: 50)

  • +
  • cutoff (float, optional) – Cutoff distance for interatomic interactions. +(default: 10.0)

  • +
  • readout (string, optional) – Whether to apply "add" or +"mean" global aggregation. (default: "add")

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+_forward(data)#
+
+ +
+
+forward(data)#
+
+
Parameters:
+
    +
  • z (torch.Tensor) – Atomic number of each atom with shape +[num_atoms].

  • +
  • pos (torch.Tensor) – Coordinates of each atom with shape +[num_atoms, 3].

  • +
  • batch (torch.Tensor, optional) – Batch indices assigning each atom +to a separate molecule with shape [num_atoms]. +(default: None)

  • +
+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/scn/index.html b/autoapi/core/models/scn/index.html new file mode 100644 index 000000000..b465fabdd --- /dev/null +++ b/autoapi/core/models/scn/index.html @@ -0,0 +1,906 @@ + + + + + + + + + + + core.models.scn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.scn#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

SphericalChannelNetwork

Spherical Channel Network

+
+
+class core.models.scn.SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True)#
+

Bases: fairchem.core.models.base.BaseModel

+

Spherical Channel Network +Paper: Spherical Channels for Modeling Atomic Interactions

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_num_neighbors (int) – Maximum number of neighbors per atom

  • +
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_interactions (int) – Number of layers in the GNN

  • +
  • lmax (int) – Maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax (int) – Maximum order of the spherical harmonics (0 or 1)

  • +
  • num_resolutions (int) – Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • sphere_channels_reduce (int) – Number of spherical channels used during message passing (downsample or upsample)

  • +
  • hidden_channels (int) – Number of hidden units in message passing

  • +
  • num_taps (int) – Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)

  • +
  • use_grid (bool) – Use non-linear pointwise convolution during aggregation

  • +
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • num_basis_functions (int) – Number of basis functions used for distance and atomic number blocks

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • basis_width_scalar (float) – Width of distance basis function

  • +
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • +
  • show_timing_info (bool) – Show timing and memory info

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+energy_fc1: torch.nn.Linear#
+
+ +
+
+energy_fc2: torch.nn.Linear#
+
+ +
+
+energy_fc3: torch.nn.Linear#
+
+ +
+
+force_fc1: torch.nn.Linear#
+
+ +
+
+force_fc2: torch.nn.Linear#
+
+ +
+
+force_fc3: torch.nn.Linear#
+
+ +
+
+forward(data)#
+
+ +
+
+_forward_helper(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+
+_rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/scn/sampling/index.html b/autoapi/core/models/scn/sampling/index.html new file mode 100644 index 000000000..483325980 --- /dev/null +++ b/autoapi/core/models/scn/sampling/index.html @@ -0,0 +1,794 @@ + + + + + + + + + + + core.models.scn.sampling — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.scn.sampling

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.scn.sampling#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

CalcSpherePoints(→ torch.Tensor)

CalcSpherePointsRandom(→ torch.Tensor)

+
+
+core.models.scn.sampling.CalcSpherePoints(num_points: int, device: str = 'cpu') torch.Tensor#
+
+ +
+
+core.models.scn.sampling.CalcSpherePointsRandom(num_points: int, device) torch.Tensor#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/scn/scn/index.html b/autoapi/core/models/scn/scn/index.html new file mode 100644 index 000000000..d6133d9a9 --- /dev/null +++ b/autoapi/core/models/scn/scn/index.html @@ -0,0 +1,1055 @@ + + + + + + + + + + + core.models.scn.scn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.scn.scn#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

SphericalChannelNetwork

Spherical Channel Network

EdgeBlock

Base class for all neural network modules.

MessageBlock

Base class for all neural network modules.

DistanceBlock

Base class for all neural network modules.

+
+
+class core.models.scn.scn.SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True)#
+

Bases: fairchem.core.models.base.BaseModel

+

Spherical Channel Network +Paper: Spherical Channels for Modeling Atomic Interactions

+
+
Parameters:
+
    +
  • use_pbc (bool) – Use periodic boundary conditions

  • +
  • regress_forces (bool) – Compute forces

  • +
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • +
  • max_num_neighbors (int) – Maximum number of neighbors per atom

  • +
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • +
  • max_num_elements (int) – Maximum atomic number

  • +
  • num_interactions (int) – Number of layers in the GNN

  • +
  • lmax (int) – Maximum degree of the spherical harmonics (1 to 10)

  • +
  • mmax (int) – Maximum order of the spherical harmonics (0 or 1)

  • +
  • num_resolutions (int) – Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)

  • +
  • sphere_channels (int) – Number of spherical channels

  • +
  • sphere_channels_reduce (int) – Number of spherical channels used during message passing (downsample or upsample)

  • +
  • hidden_channels (int) – Number of hidden units in message passing

  • +
  • num_taps (int) – Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)

  • +
  • use_grid (bool) – Use non-linear pointwise convolution during aggregation

  • +
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • +
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • +
  • num_basis_functions (int) – Number of basis functions used for distance and atomic number blocks

  • +
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • +
  • basis_width_scalar (float) – Width of distance basis function

  • +
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • +
  • show_timing_info (bool) – Show timing and memory info

  • +
+
+
+
+
+property num_params: int#
+
+ +
+
+energy_fc1: torch.nn.Linear#
+
+ +
+
+energy_fc2: torch.nn.Linear#
+
+ +
+
+energy_fc3: torch.nn.Linear#
+
+ +
+
+force_fc1: torch.nn.Linear#
+
+ +
+
+force_fc2: torch.nn.Linear#
+
+ +
+
+force_fc3: torch.nn.Linear#
+
+ +
+
+forward(data)#
+
+ +
+
+_forward_helper(data)#
+
+ +
+
+_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
+
+ +
+
+_rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) torch.Tensor#
+
+ +
+ +
+
+class core.models.scn.scn.EdgeBlock(num_resolutions: int, sphere_channels_reduce, hidden_channels_list, cutoff_list, sphharm_list, sphere_channels, distance_expansion, max_num_elements: int, num_basis_functions: int, num_gaussians: int, use_grid: bool, act)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x, atomic_numbers, edge_distance, edge_index, cutoff_index)#
+
+ +
+ +
+
+class core.models.scn.scn.MessageBlock(sphere_channels_reduce, hidden_channels, num_basis_functions, sphharm, act)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x, x_edge, edge_index)#
+
+ +
+ +
+
+class core.models.scn.scn.DistanceBlock(in_channels, num_basis_functions: int, distance_expansion, max_num_elements: int, act)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(edge_distance, source_element, target_element)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/scn/smearing/index.html b/autoapi/core/models/scn/smearing/index.html new file mode 100644 index 000000000..ab7f3c2a7 --- /dev/null +++ b/autoapi/core/models/scn/smearing/index.html @@ -0,0 +1,986 @@ + + + + + + + + + + + core.models.scn.smearing — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.scn.smearing#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

GaussianSmearing

Base class for all neural network modules.

SigmoidSmearing

Base class for all neural network modules.

LinearSigmoidSmearing

Base class for all neural network modules.

SiLUSmearing

Base class for all neural network modules.

+
+
+class core.models.scn.smearing.GaussianSmearing(start: float = -5.0, stop: float = 5.0, num_gaussians: int = 50, basis_width_scalar: float = 1.0)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist) torch.Tensor#
+
+ +
+ +
+
+class core.models.scn.smearing.SigmoidSmearing(start=-5.0, stop=5.0, num_sigmoid=50, basis_width_scalar=1.0)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist) torch.Tensor#
+
+ +
+ +
+
+class core.models.scn.smearing.LinearSigmoidSmearing(start: float = -5.0, stop: float = 5.0, num_sigmoid: int = 50, basis_width_scalar: float = 1.0)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist) torch.Tensor#
+
+ +
+ +
+
+class core.models.scn.smearing.SiLUSmearing(start: float = -5.0, stop: float = 5.0, num_output: int = 50, basis_width_scalar: float = 1.0)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(dist)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/scn/spherical_harmonics/index.html b/autoapi/core/models/scn/spherical_harmonics/index.html new file mode 100644 index 000000000..b17c2570a --- /dev/null +++ b/autoapi/core/models/scn/spherical_harmonics/index.html @@ -0,0 +1,924 @@ + + + + + + + + + + + core.models.scn.spherical_harmonics — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.scn.spherical_harmonics#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

SphericalHarmonicsHelper

Helper functions for spherical harmonics calculations and representations

+
+
+

Functions#

+ + + + + + + + + +

wigner_D(l, alpha, beta, gamma)

_z_rot_mat(angle, l)

+
+
+

Attributes#

+ + + + + + +

_Jd

+
+
+core.models.scn.spherical_harmonics._Jd#
+
+ +
+
+class core.models.scn.spherical_harmonics.SphericalHarmonicsHelper(lmax: int, mmax: int, num_taps: int, num_bands: int)#
+

Helper functions for spherical harmonics calculations and representations

+
+
Parameters:
+
    +
  • lmax (int) – Maximum degree of the spherical harmonics

  • +
  • mmax (int) – Maximum order of the spherical harmonics

  • +
  • num_taps (int) – Number of taps or rotations (1 or otherwise set automatically based on mmax)

  • +
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • +
+
+
+
+
+InitWignerDMatrix(edge_rot_mat) None#
+
+ +
+
+InitYRotMapping()#
+
+ +
+
+ToGrid(x, channels) torch.Tensor#
+
+ +
+
+FromGrid(x_grid, channels) torch.Tensor#
+
+ +
+
+CombineYRotations(x) torch.Tensor#
+
+ +
+
+Rotate(x) torch.Tensor#
+
+ +
+
+FlipGrid(grid, num_channels: int) torch.Tensor#
+
+ +
+
+RotateInv(x) torch.Tensor#
+
+ +
+
+RotateWigner(x, wigner) torch.Tensor#
+
+ +
+
+RotationMatrix(rot_x: float, rot_y: float, rot_z: float) torch.Tensor#
+
+ +
+
+RotationToWignerDMatrix(edge_rot_mat, start_lmax, end_lmax)#
+
+ +
+ +
+
+core.models.scn.spherical_harmonics.wigner_D(l, alpha, beta, gamma)#
+
+ +
+
+core.models.scn.spherical_harmonics._z_rot_mat(angle, l)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/utils/activations/index.html b/autoapi/core/models/utils/activations/index.html new file mode 100644 index 000000000..2caa49efb --- /dev/null +++ b/autoapi/core/models/utils/activations/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + core.models.utils.activations — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.utils.activations

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.utils.activations#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Act

Base class for all neural network modules.

+
+
+class core.models.utils.activations.Act(act: str, slope: float = 0.05)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/utils/basis/index.html b/autoapi/core/models/utils/basis/index.html new file mode 100644 index 000000000..457a7eaeb --- /dev/null +++ b/autoapi/core/models/utils/basis/index.html @@ -0,0 +1,1166 @@ + + + + + + + + + + + core.models.utils.basis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.models.utils.basis#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + + + + + + + +

Sine

Base class for all neural network modules.

SIREN

Base class for all neural network modules.

SINESmearing

Base class for all neural network modules.

GaussianSmearing

Base class for all neural network modules.

FourierSmearing

Base class for all neural network modules.

Basis

Base class for all neural network modules.

SphericalSmearing

Base class for all neural network modules.

+
+
+class core.models.utils.basis.Sine(w0: float = 30.0)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.utils.basis.SIREN(layers: list[int], num_in_features: int, out_features: int, w0: float = 30.0, initializer: str | None = 'siren', c: float = 6)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(X: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.utils.basis.SINESmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.utils.basis.GaussianSmearing(num_in_features: int, start: int = 0, end: int = 1, num_freqs: int = 50)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.utils.basis.FourierSmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(x: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+class core.models.utils.basis.Basis(num_in_features: int, num_freqs: int = 50, basis_type: str = 'powersine', act: str = 'ssp', sph: SphericalSmearing | None = None)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+smearing: SINESmearing | FourierSmearing | GaussianSmearing | torch.nn.Sequential#
+
+ +
+
+forward(x: torch.Tensor, edge_attr_sph: torch.Tensor | None = None)#
+
+ +
+ +
+
+class core.models.utils.basis.SphericalSmearing(max_n: int = 10, option: str = 'all')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+m: numpy.typing.NDArray[numpy.int_]#
+
+ +
+
+n: numpy.typing.NDArray[numpy.int_]#
+
+ +
+
+forward(xyz: torch.Tensor) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/models/utils/index.html b/autoapi/core/models/utils/index.html new file mode 100644 index 000000000..5a6804ca9 --- /dev/null +++ b/autoapi/core/models/utils/index.html @@ -0,0 +1,760 @@ + + + + + + + + + + + core.models.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.models.utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.models.utils#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/evaluator/index.html b/autoapi/core/modules/evaluator/index.html new file mode 100644 index 000000000..801e6c4e1 --- /dev/null +++ b/autoapi/core/modules/evaluator/index.html @@ -0,0 +1,984 @@ + + + + + + + + + + + core.modules.evaluator — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.modules.evaluator#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Evaluator

+
+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

forcesx_mae(prediction, target[, key])

forcesx_mse(prediction, target[, key])

forcesy_mae(prediction, target[, key])

forcesy_mse(prediction, target[, key])

forcesz_mae(prediction, target[, key])

forcesz_mse(prediction, target[, key])

energy_forces_within_threshold(→ dict[str, float | int])

energy_within_threshold(→ dict[str, float | int])

average_distance_within_threshold(→ dict[str, float | int])

min_diff(pred_pos, dft_pos, cell, pbc)

cosine_similarity(prediction, target[, key])

mae(→ dict[str, float | int])

mse(→ dict[str, float | int])

magnitude_error(→ dict[str, float | int])

+
+
+

Attributes#

+ + + + + + +

NONE

+
+
+core.modules.evaluator.NONE#
+
+ +
+
+class core.modules.evaluator.Evaluator(task: str | None = None, eval_metrics: dict | None = None)#
+
+
+task_metrics: ClassVar[dict[str, str]]#
+
+ +
+
+task_primary_metric: ClassVar[dict[str, str | None]]#
+
+ +
+
+eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics=None)#
+
+ +
+
+update(key, stat, metrics)#
+
+ +
+ +
+
+core.modules.evaluator.forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
+
+ +
+
+core.modules.evaluator.forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
+
+ +
+
+core.modules.evaluator.forcesy_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
+
+ +
+
+core.modules.evaluator.forcesy_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
+
+ +
+
+core.modules.evaluator.forcesz_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
+
+ +
+
+core.modules.evaluator.forcesz_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
+
+ +
+
+core.modules.evaluator.energy_forces_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
+
+ +
+
+core.modules.evaluator.energy_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
+
+ +
+
+core.modules.evaluator.average_distance_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
+
+ +
+
+core.modules.evaluator.min_diff(pred_pos: torch.Tensor, dft_pos: torch.Tensor, cell: torch.Tensor, pbc: torch.Tensor)#
+
+ +
+
+core.modules.evaluator.cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
+
+ +
+
+core.modules.evaluator.mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) dict[str, float | int]#
+
+ +
+
+core.modules.evaluator.mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) dict[str, float | int]#
+
+ +
+
+core.modules.evaluator.magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE, p: int = 2) dict[str, float | int]#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/exponential_moving_average/index.html b/autoapi/core/modules/exponential_moving_average/index.html new file mode 100644 index 000000000..5b9a4a305 --- /dev/null +++ b/autoapi/core/modules/exponential_moving_average/index.html @@ -0,0 +1,897 @@ + + + + + + + + + + + core.modules.exponential_moving_average — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.modules.exponential_moving_average#

+

Copied (and improved) from: +fadel/pytorch_ema (MIT license)

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

ExponentialMovingAverage

Maintains (exponential) moving average of a set of parameters.

+
+
+class core.modules.exponential_moving_average.ExponentialMovingAverage(parameters: collections.abc.Iterable[torch.nn.Parameter], decay: float, use_num_updates: bool = False)#
+

Maintains (exponential) moving average of a set of parameters.

+
+
Parameters:
+
    +
  • parameters – Iterable of torch.nn.Parameter (typically from +model.parameters()).

  • +
  • decay – The exponential decay.

  • +
  • use_num_updates – Whether to use number of updates when computing +averages.

  • +
+
+
+
+
+_get_parameters(parameters: collections.abc.Iterable[torch.nn.Parameter] | None) collections.abc.Iterable[torch.nn.Parameter]#
+
+ +
+
+update(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
+

Update currently maintained parameters.

+

Call this every time the parameters are updated, such as the result of +the optimizer.step() call.

+
+
Parameters:
+

parameters – Iterable of torch.nn.Parameter; usually the same set of +parameters used to initialize this object. If None, the +parameters with which this ExponentialMovingAverage was +initialized will be used.

+
+
+
+ +
+
+copy_to(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
+

Copy current parameters into given collection of parameters.

+
+
Parameters:
+

parameters – Iterable of torch.nn.Parameter; the parameters to be +updated with the stored moving averages. If None, the +parameters with which this ExponentialMovingAverage was +initialized will be used.

+
+
+
+ +
+
+store(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
+

Save the current parameters for restoring later.

+
+
Parameters:
+

parameters – Iterable of torch.nn.Parameter; the parameters to be +temporarily stored. If None, the parameters of with which this +ExponentialMovingAverage was initialized will be used.

+
+
+
+ +
+
+restore(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
+

Restore the parameters stored with the store method. +Useful to validate the model with EMA parameters without affecting the +original optimization process. Store the parameters before the +copy_to method. After validation (or model saving), use this to +restore the former parameters.

+
+
Parameters:
+

parameters – Iterable of torch.nn.Parameter; the parameters to be +updated with the stored parameters. If None, the +parameters with which this ExponentialMovingAverage was +initialized will be used.

+
+
+
+ +
+
+state_dict() dict#
+

Returns the state of the ExponentialMovingAverage as a dict.

+
+ +
+
+load_state_dict(state_dict: dict) None#
+

Loads the ExponentialMovingAverage state.

+
+
Parameters:
+

state_dict (dict) – EMA state. Should be an object returned +from a call to state_dict().

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/index.html b/autoapi/core/modules/index.html new file mode 100644 index 000000000..13e8caebe --- /dev/null +++ b/autoapi/core/modules/index.html @@ -0,0 +1,783 @@ + + + + + + + + + + + core.modules — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.modules#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/loss/index.html b/autoapi/core/modules/loss/index.html new file mode 100644 index 000000000..53bb00ec3 --- /dev/null +++ b/autoapi/core/modules/loss/index.html @@ -0,0 +1,930 @@ + + + + + + + + + + + core.modules.loss — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.loss

+ +
+ +
+
+ + + + +
+ +
+

core.modules.loss#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

L2MAELoss

Base class for all neural network modules.

AtomwiseL2Loss

Base class for all neural network modules.

DDPLoss

Base class for all neural network modules.

+
+
+class core.modules.loss.L2MAELoss(reduction: str = 'mean')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input: torch.Tensor, target: torch.Tensor)#
+
+ +
+ +
+
+class core.modules.loss.AtomwiseL2Loss(reduction: str = 'mean')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor)#
+
+ +
+ +
+
+class core.modules.loss.DDPLoss(loss_fn, loss_name: str = 'mae', reduction: str = 'mean')#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor | None = None, batch_size: int | None = None)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/normalizer/index.html b/autoapi/core/modules/normalizer/index.html new file mode 100644 index 000000000..6ec10f7c4 --- /dev/null +++ b/autoapi/core/modules/normalizer/index.html @@ -0,0 +1,824 @@ + + + + + + + + + + + core.modules.normalizer — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.normalizer

+ +
+ +
+
+ + + + +
+ +
+

core.modules.normalizer#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Normalizer

Normalize a Tensor and restore it later.

+
+
+class core.modules.normalizer.Normalizer(tensor: torch.Tensor | None = None, mean=None, std=None, device=None)#
+

Normalize a Tensor and restore it later.

+
+
+to(device) None#
+
+ +
+
+norm(tensor: torch.Tensor) torch.Tensor#
+
+ +
+
+denorm(normed_tensor: torch.Tensor) torch.Tensor#
+
+ +
+
+state_dict()#
+
+ +
+
+load_state_dict(state_dict) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scaling/compat/index.html b/autoapi/core/modules/scaling/compat/index.html new file mode 100644 index 000000000..2dc7879e4 --- /dev/null +++ b/autoapi/core/modules/scaling/compat/index.html @@ -0,0 +1,814 @@ + + + + + + + + + + + core.modules.scaling.compat — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.scaling.compat

+ +
+ +
+
+ + + + +
+ +
+

core.modules.scaling.compat#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

_load_scale_dict(scale_file)

Loads scale factors from either:

load_scales_compat(→ None)

+
+
+

Attributes#

+ + + + + + +

ScaleDict

+
+
+core.modules.scaling.compat.ScaleDict#
+
+ +
+
+core.modules.scaling.compat._load_scale_dict(scale_file: str | ScaleDict | None)#
+

Loads scale factors from either: +- a JSON file mapping scale factor names to scale values +- a python dictionary pickled object (loaded using torch.load) mapping scale factor names to scale values +- a dictionary mapping scale factor names to scale values

+
+ +
+
+core.modules.scaling.compat.load_scales_compat(module: torch.nn.Module, scale_file: str | ScaleDict | None) None#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scaling/fit/index.html b/autoapi/core/modules/scaling/fit/index.html new file mode 100644 index 000000000..60be747bd --- /dev/null +++ b/autoapi/core/modules/scaling/fit/index.html @@ -0,0 +1,801 @@ + + + + + + + + + + + core.modules.scaling.fit — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.scaling.fit

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.modules.scaling.fit#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

_prefilled_input(→ str)

_train_batch(→ None)

main(→ None)

+
+
+core.modules.scaling.fit._prefilled_input(prompt: str, prefill: str = '') str#
+
+ +
+
+core.modules.scaling.fit._train_batch(trainer: fairchem.core.trainers.base_trainer.BaseTrainer, batch) None#
+
+ +
+
+core.modules.scaling.fit.main(*, num_batches: int = 16) None#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scaling/index.html b/autoapi/core/modules/scaling/index.html new file mode 100644 index 000000000..5cb4ecd18 --- /dev/null +++ b/autoapi/core/modules/scaling/index.html @@ -0,0 +1,921 @@ + + + + + + + + + + + core.modules.scaling — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.modules.scaling#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

ScaleFactor

Base class for all neural network modules.

+
+
+class core.modules.scaling.ScaleFactor(name: str | None = None, enforce_consistency: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+property fitted: bool#
+
+ +
+
+scale_factor: torch.Tensor#
+
+ +
+
+name: str | None#
+
+ +
+
+index_fn: IndexFn | None#
+
+ +
+
+stats: _Stats | None#
+
+ +
+
+_enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) None#
+
+ +
+
+reset_() None#
+
+ +
+
+set_(scale: float | torch.Tensor) None#
+
+ +
+
+initialize_(*, index_fn: IndexFn | None = None) None#
+
+ +
+
+fit_context_()#
+
+ +
+
+fit_()#
+
+ +
+
+_observe(x: torch.Tensor, ref: torch.Tensor | None = None) None#
+
+ +
+
+forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scaling/scale_factor/index.html b/autoapi/core/modules/scaling/scale_factor/index.html new file mode 100644 index 000000000..2be58ba51 --- /dev/null +++ b/autoapi/core/modules/scaling/scale_factor/index.html @@ -0,0 +1,997 @@ + + + + + + + + + + + core.modules.scaling.scale_factor — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.modules.scaling.scale_factor#

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

_Stats

dict() -> new empty dictionary

ScaleFactor

Base class for all neural network modules.

+
+
+

Functions#

+ + + + + + +

_check_consistency(→ None)

+
+
+

Attributes#

+ + + + + + +

IndexFn

+
+
+class core.modules.scaling.scale_factor._Stats#
+

Bases: TypedDict

+

dict() -> new empty dictionary +dict(mapping) -> new dictionary initialized from a mapping object’s

+
+

(key, value) pairs

+
+
+
dict(iterable) -> new dictionary initialized as if via:

d = {} +for k, v in iterable:

+
+

d[k] = v

+
+
+
dict(**kwargs) -> new dictionary initialized with the name=value pairs

in the keyword argument list. For example: dict(one=1, two=2)

+
+
+
+
+variance_in: float#
+
+ +
+
+variance_out: float#
+
+ +
+
+n_samples: int#
+
+ +
+ +
+
+core.modules.scaling.scale_factor.IndexFn#
+
+ +
+
+core.modules.scaling.scale_factor._check_consistency(old: torch.Tensor, new: torch.Tensor, key: str) None#
+
+ +
+
+class core.modules.scaling.scale_factor.ScaleFactor(name: str | None = None, enforce_consistency: bool = True)#
+

Bases: torch.nn.Module

+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+
+
+

Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

+
+

Note

+

As per the example above, an __init__() call to the parent class +must be made before assignment on the child.

+
+
+
Variables:
+

training (bool) – Boolean represents whether this module is in training or +evaluation mode.

+
+
+
+
+property fitted: bool#
+
+ +
+
+scale_factor: torch.Tensor#
+
+ +
+
+name: str | None#
+
+ +
+
+index_fn: IndexFn | None#
+
+ +
+
+stats: _Stats | None#
+
+ +
+
+_enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) None#
+
+ +
+
+reset_() None#
+
+ +
+
+set_(scale: float | torch.Tensor) None#
+
+ +
+
+initialize_(*, index_fn: IndexFn | None = None) None#
+
+ +
+
+fit_context_()#
+
+ +
+
+fit_()#
+
+ +
+
+_observe(x: torch.Tensor, ref: torch.Tensor | None = None) None#
+
+ +
+
+forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) torch.Tensor#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scaling/util/index.html b/autoapi/core/modules/scaling/util/index.html new file mode 100644 index 000000000..50f32c0ac --- /dev/null +++ b/autoapi/core/modules/scaling/util/index.html @@ -0,0 +1,781 @@ + + + + + + + + + + + core.modules.scaling.util — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.scaling.util

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.modules.scaling.util#

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

ensure_fitted(→ None)

+
+
+core.modules.scaling.util.ensure_fitted(module: torch.nn.Module, warn: bool = False) None#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/scheduler/index.html b/autoapi/core/modules/scheduler/index.html new file mode 100644 index 000000000..899b1204d --- /dev/null +++ b/autoapi/core/modules/scheduler/index.html @@ -0,0 +1,820 @@ + + + + + + + + + + + core.modules.scheduler — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.scheduler

+ +
+ +
+
+ + + + +
+ +
+

core.modules.scheduler#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

LRScheduler

Learning rate scheduler class for torch.optim learning rate schedulers

+
+
+class core.modules.scheduler.LRScheduler(optimizer, config)#
+

Learning rate scheduler class for torch.optim learning rate schedulers

+

Notes

+

If no learning rate scheduler is specified in the config the default +scheduler is warmup_lr_lambda (fairchem.core.common.utils) not no scheduler, +this is for backward-compatibility reasons. To run without a lr scheduler +specify scheduler: “Null” in the optim section of the config.

+
+
Parameters:
+
    +
  • optimizer (obj) – torch optim object

  • +
  • config (dict) – Optim dict from the input config

  • +
+
+
+
+
+step(metrics=None, epoch=None) None#
+
+ +
+
+filter_kwargs(config)#
+
+ +
+
+get_lr()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/modules/transforms/index.html b/autoapi/core/modules/transforms/index.html new file mode 100644 index 000000000..70d8e84a1 --- /dev/null +++ b/autoapi/core/modules/transforms/index.html @@ -0,0 +1,811 @@ + + + + + + + + + + + core.modules.transforms — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.modules.transforms

+ +
+ +
+
+ + + + +
+ +
+

core.modules.transforms#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

DataTransforms

+
+
+

Functions#

+ + + + + + +

decompose_tensor(→ torch_geometric.data.Data)

+
+
+class core.modules.transforms.DataTransforms(config)#
+
+
+__call__(data_object)#
+
+ +
+ +
+
+core.modules.transforms.decompose_tensor(data_object, config) torch_geometric.data.Data#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/preprocessing/atoms_to_graphs/index.html b/autoapi/core/preprocessing/atoms_to_graphs/index.html new file mode 100644 index 000000000..2284f1e80 --- /dev/null +++ b/autoapi/core/preprocessing/atoms_to_graphs/index.html @@ -0,0 +1,1065 @@ + + + + + + + + + + + core.preprocessing.atoms_to_graphs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.preprocessing.atoms_to_graphs#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

AtomsToGraphs

A class to help convert periodic atomic structures to graphs.

+
+
+

Attributes#

+ + + + + + + + + +

AseAtomsAdaptor

shell

+
+
+core.preprocessing.atoms_to_graphs.AseAtomsAdaptor#
+
+ +
+
+core.preprocessing.atoms_to_graphs.shell#
+
+ +
+
+class core.preprocessing.atoms_to_graphs.AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None)#
+

A class to help convert periodic atomic structures to graphs.

+

The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts +them into graph representations for use in PyTorch. The primary purpose of this class is to determine the +nearest neighbors within some radius around each individual atom, taking into account PBC, and set the +pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information +are put into a PyTorch geometric data object for use with PyTorch.

+
+
Parameters:
+
    +
  • max_neigh (int) – Maximum number of neighbors to consider.

  • +
  • radius (int or float) – Cutoff radius in Angstroms to search for neighbors.

  • +
  • r_energy (bool) – Return the energy with other properties. Default is False, so the energy will not be returned.

  • +
  • r_forces (bool) – Return the forces with other properties. Default is False, so the forces will not be returned.

  • +
  • r_stress (bool) – Return the stress with other properties. Default is False, so the stress will not be returned.

  • +
  • r_distances (bool) – Return the distances with other properties.

  • +
  • False (Default is)

  • +
  • returned. (so the periodic boundary conditions will not be)

  • +
  • r_edges (bool) – Return interatomic edges with other properties. Default is True, so edges will be returned.

  • +
  • r_fixed (bool) – Return a binary vector with flags for fixed (1) vs free (0) atoms.

  • +
  • True (Default is)

  • +
  • returned.

  • +
  • r_pbc (bool) – Return the periodic boundary conditions with other properties.

  • +
  • False

  • +
  • returned.

  • +
  • r_data_keys (sequence of str, optional) – Return values corresponding to given keys in atoms.info data with other

  • +
  • None (properties. Default is)

  • +
  • properties. (so no data will be returned as)

  • +
+
+
+
+
+max_neigh#
+

Maximum number of neighbors to consider.

+
+
Type:
+

int

+
+
+
+ +
+
+radius#
+

Cutoff radius in Angstoms to search for neighbors.

+
+
Type:
+

int or float

+
+
+
+ +
+
+r_energy#
+

Return the energy with other properties. Default is False, so the energy will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_forces#
+

Return the forces with other properties. Default is False, so the forces will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_stress#
+

Return the stress with other properties. Default is False, so the stress will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_distances#
+

Return the distances with other properties.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is False, so the distances will not be returned.
+
+ +
+
+r_edges#
+

Return interatomic edges with other properties. Default is True, so edges will be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_fixed#
+

Return a binary vector with flags for fixed (1) vs free (0) atoms.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is True, so the fixed indices will be returned.
+
+ +
+
+r_pbc#
+

Return the periodic boundary conditions with other properties.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is False, so the periodic boundary conditions will not be returned.
+
+ +
+
+r_data_keys#
+

Return values corresponding to given keys in atoms.info data with other

+
+
Type:
+

sequence of str, optional

+
+
+
+ +
+
+properties. Default is None, so no data will be returned as properties.
+
+ +
+
+_get_neighbors_pymatgen(atoms: ase.Atoms)#
+

Preforms nearest neighbor search and returns edge index, distances, +and cell offsets

+
+ +
+
+_reshape_features(c_index, n_index, n_distance, offsets)#
+

Stack center and neighbor index and reshapes distances, +takes in np.arrays and returns torch tensors

+
+ +
+
+convert(atoms: ase.Atoms, sid=None)#
+

Convert a single atomic structure to a graph.

+
+
Parameters:
+
    +
  • atoms (ase.atoms.Atoms) – An ASE atoms object.

  • +
  • sid (uniquely identifying object) – An identifier that can be used to track the structure in downstream

  • +
  • integers. (tasks. Common sids used in OCP datasets include unique strings or)

  • +
+
+
Returns:
+

A torch geometic data object with positions, atomic_numbers, tags, +and optionally, energy, forces, distances, edges, and periodic boundary conditions. +Optional properties can included by setting r_property=True when constructing the class.

+
+
Return type:
+

data (torch_geometric.data.Data)

+
+
+
+ +
+
+convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False)#
+

Convert all atoms objects in a list or in an ase.db to graphs.

+
+
Parameters:
+
    +
  • atoms_collection (list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database)

  • +
  • database. (Either a list of ASE atoms objects or an ASE)

  • +
  • processed_file_path (str)

  • +
  • None. (A string of the path to where the processed file will be written. Default is)

  • +
  • collate_and_save (bool) – A boolean to collate and save or not. Default is False, so will not write a file.

  • +
+
+
Returns:
+

A list of torch geometric data objects containing molecular graph info and properties.

+
+
Return type:
+

data_list (list of torch_geometric.data.Data)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/preprocessing/index.html b/autoapi/core/preprocessing/index.html new file mode 100644 index 000000000..bb1b5c5d4 --- /dev/null +++ b/autoapi/core/preprocessing/index.html @@ -0,0 +1,1046 @@ + + + + + + + + + + + core.preprocessing — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.preprocessing#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

AtomsToGraphs

A class to help convert periodic atomic structures to graphs.

+
+
+class core.preprocessing.AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None)#
+

A class to help convert periodic atomic structures to graphs.

+

The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts +them into graph representations for use in PyTorch. The primary purpose of this class is to determine the +nearest neighbors within some radius around each individual atom, taking into account PBC, and set the +pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information +are put into a PyTorch geometric data object for use with PyTorch.

+
+
Parameters:
+
    +
  • max_neigh (int) – Maximum number of neighbors to consider.

  • +
  • radius (int or float) – Cutoff radius in Angstroms to search for neighbors.

  • +
  • r_energy (bool) – Return the energy with other properties. Default is False, so the energy will not be returned.

  • +
  • r_forces (bool) – Return the forces with other properties. Default is False, so the forces will not be returned.

  • +
  • r_stress (bool) – Return the stress with other properties. Default is False, so the stress will not be returned.

  • +
  • r_distances (bool) – Return the distances with other properties.

  • +
  • False (Default is)

  • +
  • returned. (so the periodic boundary conditions will not be)

  • +
  • r_edges (bool) – Return interatomic edges with other properties. Default is True, so edges will be returned.

  • +
  • r_fixed (bool) – Return a binary vector with flags for fixed (1) vs free (0) atoms.

  • +
  • True (Default is)

  • +
  • returned.

  • +
  • r_pbc (bool) – Return the periodic boundary conditions with other properties.

  • +
  • False

  • +
  • returned.

  • +
  • r_data_keys (sequence of str, optional) – Return values corresponding to given keys in atoms.info data with other

  • +
  • None (properties. Default is)

  • +
  • properties. (so no data will be returned as)

  • +
+
+
+
+
+max_neigh#
+

Maximum number of neighbors to consider.

+
+
Type:
+

int

+
+
+
+ +
+
+radius#
+

Cutoff radius in Angstoms to search for neighbors.

+
+
Type:
+

int or float

+
+
+
+ +
+
+r_energy#
+

Return the energy with other properties. Default is False, so the energy will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_forces#
+

Return the forces with other properties. Default is False, so the forces will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_stress#
+

Return the stress with other properties. Default is False, so the stress will not be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_distances#
+

Return the distances with other properties.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is False, so the distances will not be returned.
+
+ +
+
+r_edges#
+

Return interatomic edges with other properties. Default is True, so edges will be returned.

+
+
Type:
+

bool

+
+
+
+ +
+
+r_fixed#
+

Return a binary vector with flags for fixed (1) vs free (0) atoms.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is True, so the fixed indices will be returned.
+
+ +
+
+r_pbc#
+

Return the periodic boundary conditions with other properties.

+
+
Type:
+

bool

+
+
+
+ +
+
+Default is False, so the periodic boundary conditions will not be returned.
+
+ +
+
+r_data_keys#
+

Return values corresponding to given keys in atoms.info data with other

+
+
Type:
+

sequence of str, optional

+
+
+
+ +
+
+properties. Default is None, so no data will be returned as properties.
+
+ +
+
+_get_neighbors_pymatgen(atoms: ase.Atoms)#
+

Preforms nearest neighbor search and returns edge index, distances, +and cell offsets

+
+ +
+
+_reshape_features(c_index, n_index, n_distance, offsets)#
+

Stack center and neighbor index and reshapes distances, +takes in np.arrays and returns torch tensors

+
+ +
+
+convert(atoms: ase.Atoms, sid=None)#
+

Convert a single atomic structure to a graph.

+
+
Parameters:
+
    +
  • atoms (ase.atoms.Atoms) – An ASE atoms object.

  • +
  • sid (uniquely identifying object) – An identifier that can be used to track the structure in downstream

  • +
  • integers. (tasks. Common sids used in OCP datasets include unique strings or)

  • +
+
+
Returns:
+

A torch geometic data object with positions, atomic_numbers, tags, +and optionally, energy, forces, distances, edges, and periodic boundary conditions. +Optional properties can included by setting r_property=True when constructing the class.

+
+
Return type:
+

data (torch_geometric.data.Data)

+
+
+
+ +
+
+convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False)#
+

Convert all atoms objects in a list or in an ase.db to graphs.

+
+
Parameters:
+
    +
  • atoms_collection (list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database)

  • +
  • database. (Either a list of ASE atoms objects or an ASE)

  • +
  • processed_file_path (str)

  • +
  • None. (A string of the path to where the processed file will be written. Default is)

  • +
  • collate_and_save (bool) – A boolean to collate and save or not. Default is False, so will not write a file.

  • +
+
+
Returns:
+

A list of torch geometric data objects containing molecular graph info and properties.

+
+
Return type:
+

data_list (list of torch_geometric.data.Data)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/download_data/index.html b/autoapi/core/scripts/download_data/index.html new file mode 100644 index 000000000..c22c43563 --- /dev/null +++ b/autoapi/core/scripts/download_data/index.html @@ -0,0 +1,870 @@ + + + + + + + + + + + core.scripts.download_data — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.scripts.download_data#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

get_data(→ None)

uncompress_data(→ str)

preprocess_data(→ None)

verify_count(→ None)

cleanup(→ None)

+
+
+

Attributes#

+ + + + + + + + + + + + + + + +

DOWNLOAD_LINKS_s2ef

DOWNLOAD_LINKS_is2re

S2EF_COUNTS

parser

+
+ +
+ +
+ +
+ +
+
+core.scripts.download_data.S2EF_COUNTS#
+
+ +
+
+core.scripts.download_data.get_data(datadir: str, task: str, split: str | None, del_intmd_files: bool) None#
+
+ +
+
+core.scripts.download_data.uncompress_data(compressed_dir: str) str#
+
+ +
+
+core.scripts.download_data.preprocess_data(uncompressed_dir: str, output_path: str) None#
+
+ +
+
+core.scripts.download_data.verify_count(output_path: str, task: str, split: str) None#
+
+ +
+
+core.scripts.download_data.cleanup(filename: str, dirname: str) None#
+
+ +
+
+core.scripts.download_data.parser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/gif_maker_parallelized/index.html b/autoapi/core/scripts/gif_maker_parallelized/index.html new file mode 100644 index 000000000..8c4071ecf --- /dev/null +++ b/autoapi/core/scripts/gif_maker_parallelized/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + core.scripts.gif_maker_parallelized — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.gif_maker_parallelized

+ +
+ +
+
+ + + + +
+ +
+

core.scripts.gif_maker_parallelized#

+

Script to generate gifs from traj

+

Note: +This is just a quick way to generate gifs and visalizations from traj, there are many parameters and settings in the code that people can vary to make visualizations better. We have chosen these settings as this seem to work fine for most of our systems.

+

Requirements:

+

povray +ffmpeg +ase==3.21

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

pov_from_atoms(→ None)

parallelize_generation(→ None)

get_parser(→ argparse.ArgumentParser)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.gif_maker_parallelized.pov_from_atoms(mp_args) None#
+
+ +
+
+core.scripts.gif_maker_parallelized.parallelize_generation(traj_path, out_path: str, n_procs) None#
+
+ +
+
+core.scripts.gif_maker_parallelized.get_parser() argparse.ArgumentParser#
+
+ +
+
+core.scripts.gif_maker_parallelized.parser: argparse.ArgumentParser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/hpo/index.html b/autoapi/core/scripts/hpo/index.html new file mode 100644 index 000000000..938e9ac19 --- /dev/null +++ b/autoapi/core/scripts/hpo/index.html @@ -0,0 +1,763 @@ + + + + + + + + + + + core.scripts.hpo — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.hpo

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts.hpo#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/hpo/run_tune/index.html b/autoapi/core/scripts/hpo/run_tune/index.html new file mode 100644 index 000000000..a12d5705a --- /dev/null +++ b/autoapi/core/scripts/hpo/run_tune/index.html @@ -0,0 +1,791 @@ + + + + + + + + + + + core.scripts.hpo.run_tune — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.hpo.run_tune

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts.hpo.run_tune#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

ocp_trainable(→ None)

main(→ None)

+
+
+core.scripts.hpo.run_tune.ocp_trainable(config, checkpoint_dir=None) None#
+
+ +
+
+core.scripts.hpo.run_tune.main() None#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/hpo/run_tune_pbt/index.html b/autoapi/core/scripts/hpo/run_tune_pbt/index.html new file mode 100644 index 000000000..f4345d59b --- /dev/null +++ b/autoapi/core/scripts/hpo/run_tune_pbt/index.html @@ -0,0 +1,791 @@ + + + + + + + + + + + core.scripts.hpo.run_tune_pbt — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.hpo.run_tune_pbt

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts.hpo.run_tune_pbt#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

ocp_trainable(→ None)

main(→ None)

+
+
+core.scripts.hpo.run_tune_pbt.ocp_trainable(config, checkpoint_dir=None) None#
+
+ +
+
+core.scripts.hpo.run_tune_pbt.main() None#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/index.html b/autoapi/core/scripts/index.html new file mode 100644 index 000000000..1ceb57019 --- /dev/null +++ b/autoapi/core/scripts/index.html @@ -0,0 +1,783 @@ + + + + + + + + + + + core.scripts — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/make_challenge_submission_file/index.html b/autoapi/core/scripts/make_challenge_submission_file/index.html new file mode 100644 index 000000000..b3352a4f5 --- /dev/null +++ b/autoapi/core/scripts/make_challenge_submission_file/index.html @@ -0,0 +1,825 @@ + + + + + + + + + + + core.scripts.make_challenge_submission_file — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.make_challenge_submission_file

+ +
+ +
+
+ + + + +
+ +
+

core.scripts.make_challenge_submission_file#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+

ONLY for use in the NeurIPS 2021 Open Catalyst Challenge. For all other submissions +please use make_submission_file.py.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

write_is2re_relaxations(→ None)

write_predictions(→ None)

main(→ None)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.make_challenge_submission_file.write_is2re_relaxations(path: str, filename: str, hybrid) None#
+
+ +
+
+core.scripts.make_challenge_submission_file.write_predictions(path: str, filename: str) None#
+
+ +
+
+core.scripts.make_challenge_submission_file.main(args: argparse.Namespace) None#
+
+ +
+
+core.scripts.make_challenge_submission_file.parser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/make_lmdb_sizes/index.html b/autoapi/core/scripts/make_lmdb_sizes/index.html new file mode 100644 index 000000000..39f952b5b --- /dev/null +++ b/autoapi/core/scripts/make_lmdb_sizes/index.html @@ -0,0 +1,812 @@ + + + + + + + + + + + core.scripts.make_lmdb_sizes — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.make_lmdb_sizes

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts.make_lmdb_sizes#

+

This script provides the functionality to generate metadata.npz files necessary +for load_balancing the DataLoader.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

get_data(index)

main(→ None)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.make_lmdb_sizes.get_data(index)#
+
+ +
+
+core.scripts.make_lmdb_sizes.main(args) None#
+
+ +
+
+core.scripts.make_lmdb_sizes.parser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/make_submission_file/index.html b/autoapi/core/scripts/make_submission_file/index.html new file mode 100644 index 000000000..bd15fbc84 --- /dev/null +++ b/autoapi/core/scripts/make_submission_file/index.html @@ -0,0 +1,833 @@ + + + + + + + + + + + core.scripts.make_submission_file — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.make_submission_file

+ +
+ +
+
+ + + + +
+ +
+

core.scripts.make_submission_file#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

write_is2re_relaxations(→ None)

write_predictions(→ None)

main(→ None)

+
+
+

Attributes#

+ + + + + + + + + +

SPLITS

parser

+
+
+core.scripts.make_submission_file.SPLITS#
+
+ +
+
+core.scripts.make_submission_file.write_is2re_relaxations(args) None#
+
+ +
+
+core.scripts.make_submission_file.write_predictions(args) None#
+
+ +
+
+core.scripts.make_submission_file.main(args: argparse.Namespace) None#
+
+ +
+
+core.scripts.make_submission_file.parser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/preprocess_ef/index.html b/autoapi/core/scripts/preprocess_ef/index.html new file mode 100644 index 000000000..a26ddb0fb --- /dev/null +++ b/autoapi/core/scripts/preprocess_ef/index.html @@ -0,0 +1,822 @@ + + + + + + + + + + + core.scripts.preprocess_ef — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.preprocess_ef

+ +
+ +
+
+ + + + +
+ +
+

core.scripts.preprocess_ef#

+

Creates LMDB files with extracted graph features from provided *.extxyz files +for the S2EF task.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

write_images_to_lmdb(mp_arg)

main(→ None)

get_parser(→ argparse.ArgumentParser)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.preprocess_ef.write_images_to_lmdb(mp_arg)#
+
+ +
+
+core.scripts.preprocess_ef.main(args: argparse.Namespace) None#
+
+ +
+
+core.scripts.preprocess_ef.get_parser() argparse.ArgumentParser#
+
+ +
+
+core.scripts.preprocess_ef.parser: argparse.ArgumentParser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/preprocess_relaxed/index.html b/autoapi/core/scripts/preprocess_relaxed/index.html new file mode 100644 index 000000000..5ac496ffc --- /dev/null +++ b/autoapi/core/scripts/preprocess_relaxed/index.html @@ -0,0 +1,812 @@ + + + + + + + + + + + core.scripts.preprocess_relaxed — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.preprocess_relaxed

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.scripts.preprocess_relaxed#

+

Creates LMDB files with extracted graph features from provided *.extxyz files +for the S2EF task.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

write_images_to_lmdb(→ None)

main(→ None)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.preprocess_relaxed.write_images_to_lmdb(mp_arg) None#
+
+ +
+
+core.scripts.preprocess_relaxed.main(args, split) None#
+
+ +
+
+core.scripts.preprocess_relaxed.parser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/scripts/uncompress/index.html b/autoapi/core/scripts/uncompress/index.html new file mode 100644 index 000000000..9630c6d7a --- /dev/null +++ b/autoapi/core/scripts/uncompress/index.html @@ -0,0 +1,832 @@ + + + + + + + + + + + core.scripts.uncompress — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.scripts.uncompress

+ +
+ +
+
+ + + + +
+ +
+

core.scripts.uncompress#

+

Uncompresses downloaded S2EF datasets to be used by the LMDB preprocessing +script - preprocess_ef.py

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + +

read_lzma(→ None)

decompress_list_of_files(→ None)

get_parser(→ argparse.ArgumentParser)

main(→ None)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+core.scripts.uncompress.read_lzma(inpfile: str, outfile: str) None#
+
+ +
+
+core.scripts.uncompress.decompress_list_of_files(ip_op_pair: tuple[str, str]) None#
+
+ +
+
+core.scripts.uncompress.get_parser() argparse.ArgumentParser#
+
+ +
+
+core.scripts.uncompress.main(args: argparse.Namespace) None#
+
+ +
+
+core.scripts.uncompress.parser: argparse.ArgumentParser#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tasks/index.html b/autoapi/core/tasks/index.html new file mode 100644 index 000000000..11f27d232 --- /dev/null +++ b/autoapi/core/tasks/index.html @@ -0,0 +1,876 @@ + + + + + + + + + + + core.tasks — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tasks#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

PredictTask

RelaxationTask

TrainTask

ValidateTask

+
+
+class core.tasks.PredictTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+class core.tasks.RelaxationTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+class core.tasks.TrainTask(config)#
+

Bases: BaseTask

+
+
+_process_error(e: RuntimeError) None#
+
+ +
+
+run() None#
+
+ +
+ +
+
+class core.tasks.ValidateTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tasks/task/index.html b/autoapi/core/tasks/task/index.html new file mode 100644 index 000000000..15ea65127 --- /dev/null +++ b/autoapi/core/tasks/task/index.html @@ -0,0 +1,897 @@ + + + + + + + + + + + core.tasks.task — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tasks.task#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

BaseTask

TrainTask

PredictTask

ValidateTask

RelaxationTask

+
+
+class core.tasks.task.BaseTask(config)#
+
+
+setup(trainer) None#
+
+ +
+
+abstract run()#
+
+ +
+ +
+
+class core.tasks.task.TrainTask(config)#
+

Bases: BaseTask

+
+
+_process_error(e: RuntimeError) None#
+
+ +
+
+run() None#
+
+ +
+ +
+
+class core.tasks.task.PredictTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+class core.tasks.task.ValidateTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+class core.tasks.task.RelaxationTask(config)#
+

Bases: BaseTask

+
+
+run() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/common/test_ase_calculator/index.html b/autoapi/core/tests/common/test_ase_calculator/index.html new file mode 100644 index 000000000..e272140f4 --- /dev/null +++ b/autoapi/core/tests/common/test_ase_calculator/index.html @@ -0,0 +1,806 @@ + + + + + + + + + + + core.tests.common.test_ase_calculator — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.common.test_ase_calculator

+ +
+ +
+
+ + + + +
+ +
+

core.tests.common.test_ase_calculator#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

atoms(→ ase.Atoms)

checkpoint_path(request, tmp_path)

test_calculator_setup(checkpoint_path)

test_relaxation_final_energy(→ None)

test_random_seed_final_energy(atoms, tmp_path)

+
+
+core.tests.common.test_ase_calculator.atoms() ase.Atoms#
+
+ +
+
+core.tests.common.test_ase_calculator.checkpoint_path(request, tmp_path)#
+
+ +
+
+core.tests.common.test_ase_calculator.test_calculator_setup(checkpoint_path)#
+
+ +
+
+core.tests.common.test_ase_calculator.test_relaxation_final_energy(atoms, tmp_path, snapshot) None#
+
+ +
+
+core.tests.common.test_ase_calculator.test_random_seed_final_energy(atoms, tmp_path)#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.html b/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.html new file mode 100644 index 000000000..98898fd63 --- /dev/null +++ b/autoapi/core/tests/common/test_data_parallel_batch_sampler/index.html @@ -0,0 +1,962 @@ + + + + + + + + + + + core.tests.common.test_data_parallel_batch_sampler — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.common.test_data_parallel_batch_sampler#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

_temp_file(name)

valid_path_dataset()

invalid_path_dataset()

invalid_dataset()

test_lowercase(→ None)

test_invalid_mode(→ None)

test_invalid_dataset(→ None)

test_invalid_path_dataset(→ None)

test_valid_dataset(→ None)

test_disabled(→ None)

test_single_node(→ None)

test_stateful_distributed_sampler_noshuffle(→ None)

test_stateful_distributed_sampler_vs_distributed_sampler(→ None)

test_stateful_distributed_sampler(→ None)

test_stateful_distributed_sampler_numreplicas(→ None)

test_stateful_distributed_sampler_numreplicas_drop_last(→ None)

+
+
+

Attributes#

+ + + + + + + + + + + + + + + +

DATA

SIZE_ATOMS

SIZE_NEIGHBORS

T_co

+
+
+core.tests.common.test_data_parallel_batch_sampler.DATA = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.SIZE_ATOMS = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.SIZE_NEIGHBORS = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.T_co#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler._temp_file(name: str)#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.valid_path_dataset()#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.invalid_path_dataset()#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.invalid_dataset()#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_lowercase(invalid_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_invalid_mode(invalid_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_invalid_dataset(invalid_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_invalid_path_dataset(invalid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_valid_dataset(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_disabled(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_single_node(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_noshuffle(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_vs_distributed_sampler(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas(valid_path_dataset) None#
+
+ +
+
+core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas_drop_last(valid_path_dataset) None#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/common/test_yaml_loader/index.html b/autoapi/core/tests/common/test_yaml_loader/index.html new file mode 100644 index 000000000..2b97d4c29 --- /dev/null +++ b/autoapi/core/tests/common/test_yaml_loader/index.html @@ -0,0 +1,793 @@ + + + + + + + + + + + core.tests.common.test_yaml_loader — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.common.test_yaml_loader

+ +
+ +
+
+ + + + +
+ +
+

core.tests.common.test_yaml_loader#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + +

invalid_yaml_config()

valid_yaml_config()

test_invalid_config(invalid_yaml_config)

test_valid_config(valid_yaml_config)

+
+
+core.tests.common.test_yaml_loader.invalid_yaml_config()#
+
+ +
+
+core.tests.common.test_yaml_loader.valid_yaml_config()#
+
+ +
+
+core.tests.common.test_yaml_loader.test_invalid_config(invalid_yaml_config)#
+
+ +
+
+core.tests.common.test_yaml_loader.test_valid_config(valid_yaml_config)#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/conftest/index.html b/autoapi/core/tests/conftest/index.html new file mode 100644 index 000000000..f0fdda302 --- /dev/null +++ b/autoapi/core/tests/conftest/index.html @@ -0,0 +1,917 @@ + + + + + + + + + + + core.tests.conftest — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.conftest#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + +

Approx

Wrapper object for approximately compared numpy arrays.

_ApproxNumpyFormatter

ApproxExtension

By default, syrupy uses the __repr__ of the expected (snapshot) and actual values

+
+
+

Functions#

+ + + + + + + + + +

_try_parse_approx(→ Approx | None)

Parse the string representation of an Approx object.

snapshot(snapshot)

+
+
+

Attributes#

+ + + + + + + + + +

DEFAULT_RTOL

DEFAULT_ATOL

+
+
+core.tests.conftest.DEFAULT_RTOL = 0.001#
+
+ +
+
+core.tests.conftest.DEFAULT_ATOL = 0.001#
+
+ +
+
+class core.tests.conftest.Approx(data: numpy.ndarray | list, *, rtol: float | None = None, atol: float | None = None)#
+

Wrapper object for approximately compared numpy arrays.

+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+class core.tests.conftest._ApproxNumpyFormatter(data)#
+
+
+__repr__() str#
+

Return repr(self).

+
+ +
+ +
+
+core.tests.conftest._try_parse_approx(data: syrupy.types.SerializableData) Approx | None#
+

Parse the string representation of an Approx object. +We can just use eval here, since we know the string is safe.

+
+ +
+
+class core.tests.conftest.ApproxExtension#
+

Bases: syrupy.extensions.amber.AmberSnapshotExtension

+

By default, syrupy uses the __repr__ of the expected (snapshot) and actual values +to serialize them into strings. Then, it compares the strings to see if they match.

+

However, this behavior is not ideal for comparing floats/ndarrays. For example, +if we have a snapshot with a float value of 0.1, and the actual value is 0.10000000000000001, +then the strings will not match, even though the values are effectively equal.

+

To work around this, we override the serialize method to seralize the expected value +into a special representation. Then, we override the matches function (which originally does a +simple string comparison) to parse the expected and actual values into numpy arrays. +Finally, we compare the arrays using np.allclose.

+
+
+matches(*, serialized_data: syrupy.types.SerializableData, snapshot_data: syrupy.types.SerializableData) bool#
+
+ +
+
+serialize(data, **kwargs)#
+
+ +
+ +
+
+core.tests.conftest.snapshot(snapshot)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/datasets/test_ase_datasets/index.html b/autoapi/core/tests/datasets/test_ase_datasets/index.html new file mode 100644 index 000000000..d24e6148b --- /dev/null +++ b/autoapi/core/tests/datasets/test_ase_datasets/index.html @@ -0,0 +1,852 @@ + + + + + + + + + + + core.tests.datasets.test_ase_datasets — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.datasets.test_ase_datasets#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + +

ase_dataset(request, tmp_path_factory)

test_ase_dataset(ase_dataset)

test_ase_read_dataset(→ None)

test_ase_metadata_guesser(→ None)

test_db_add_delete(→ None)

test_ase_multiread_dataset(→ None)

test_empty_dataset(tmp_path)

+
+
+

Attributes#

+ + + + + + + + + +

structures

calc

+
+
+core.tests.datasets.test_ase_datasets.structures#
+
+ +
+
+core.tests.datasets.test_ase_datasets.calc#
+
+ +
+
+core.tests.datasets.test_ase_datasets.ase_dataset(request, tmp_path_factory)#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_ase_dataset(ase_dataset)#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_ase_read_dataset(tmp_path) None#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_ase_metadata_guesser(ase_dataset) None#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_db_add_delete(tmp_path) None#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_ase_multiread_dataset(tmp_path) None#
+
+ +
+
+core.tests.datasets.test_ase_datasets.test_empty_dataset(tmp_path)#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/datasets/test_ase_lmdb/index.html b/autoapi/core/tests/datasets/test_ase_lmdb/index.html new file mode 100644 index 000000000..1124c4360 --- /dev/null +++ b/autoapi/core/tests/datasets/test_ase_lmdb/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + core.tests.datasets.test_ase_lmdb — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.datasets.test_ase_lmdb#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

generate_random_structure()

ase_lmbd_path(tmp_path_factory)

test_aselmdb_write(→ None)

test_aselmdb_count(→ None)

test_aselmdb_delete(→ None)

test_aselmdb_randomreads(→ None)

test_aselmdb_constraintread(→ None)

test_update_keyvalue_pair(→ None)

test_update_atoms(→ None)

test_metadata(→ None)

+
+
+

Attributes#

+ + + + + + + + + + + + +

N_WRITES

N_READS

test_structures

+
+
+core.tests.datasets.test_ase_lmdb.N_WRITES = 100#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.N_READS = 200#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_structures#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.generate_random_structure()#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.ase_lmbd_path(tmp_path_factory)#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_aselmdb_write(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_aselmdb_count(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_aselmdb_delete(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_aselmdb_randomreads(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_aselmdb_constraintread(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_update_keyvalue_pair(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_update_atoms(ase_lmbd_path) None#
+
+ +
+
+core.tests.datasets.test_ase_lmdb.test_metadata(ase_lmbd_path) None#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/datasets/test_utils/index.html b/autoapi/core/tests/datasets/test_utils/index.html new file mode 100644 index 000000000..b2de18995 --- /dev/null +++ b/autoapi/core/tests/datasets/test_utils/index.html @@ -0,0 +1,773 @@ + + + + + + + + + + + core.tests.datasets.test_utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.datasets.test_utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.tests.datasets.test_utils#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

pyg_data()

test_rename_data_object_keys(pyg_data)

+
+
+core.tests.datasets.test_utils.pyg_data()#
+
+ +
+
+core.tests.datasets.test_utils.test_rename_data_object_keys(pyg_data)#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/evaluator/test_evaluator/index.html b/autoapi/core/tests/evaluator/test_evaluator/index.html new file mode 100644 index 000000000..2ffd80b68 --- /dev/null +++ b/autoapi/core/tests/evaluator/test_evaluator/index.html @@ -0,0 +1,886 @@ + + + + + + + + + + + core.tests.evaluator.test_evaluator — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.evaluator.test_evaluator#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + +

TestMetrics

TestS2EFEval

TestIS2RSEval

TestIS2REEval

+
+
+

Functions#

+ + + + + + + + + + + + +

load_evaluator_s2ef(→ None)

load_evaluator_is2rs(→ None)

load_evaluator_is2re(→ None)

+
+
+core.tests.evaluator.test_evaluator.load_evaluator_s2ef(request) None#
+
+ +
+
+core.tests.evaluator.test_evaluator.load_evaluator_is2rs(request) None#
+
+ +
+
+core.tests.evaluator.test_evaluator.load_evaluator_is2re(request) None#
+
+ +
+
+class core.tests.evaluator.test_evaluator.TestMetrics#
+
+
+test_cosine_similarity() None#
+
+ +
+
+test_magnitude_error() None#
+
+ +
+ +
+
+class core.tests.evaluator.test_evaluator.TestS2EFEval#
+
+
+test_metrics_exist() None#
+
+ +
+ +
+
+class core.tests.evaluator.test_evaluator.TestIS2RSEval#
+
+
+test_metrics_exist() None#
+
+ +
+ +
+
+class core.tests.evaluator.test_evaluator.TestIS2REEval#
+
+
+test_metrics_exist() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/index.html b/autoapi/core/tests/index.html new file mode 100644 index 000000000..009d6aed4 --- /dev/null +++ b/autoapi/core/tests/index.html @@ -0,0 +1,777 @@ + + + + + + + + + + + core.tests — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.tests#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_dimenetpp/index.html b/autoapi/core/tests/models/test_dimenetpp/index.html new file mode 100644 index 000000000..977dc395a --- /dev/null +++ b/autoapi/core/tests/models/test_dimenetpp/index.html @@ -0,0 +1,813 @@ + + + + + + + + + + + core.tests.models.test_dimenetpp — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.models.test_dimenetpp

+ +
+ +
+
+ + + + +
+ +
+

core.tests.models.test_dimenetpp#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestDimeNet

+
+
+

Functions#

+ + + + + + + + + +

load_data(→ None)

load_model(→ None)

+
+
+core.tests.models.test_dimenetpp.load_data(request) None#
+
+ +
+
+core.tests.models.test_dimenetpp.load_model(request) None#
+
+ +
+
+class core.tests.models.test_dimenetpp.TestDimeNet#
+
+
+test_rotation_invariance() None#
+
+ +
+
+test_energy_force_shape(snapshot) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_equiformer_v2/index.html b/autoapi/core/tests/models/test_equiformer_v2/index.html new file mode 100644 index 000000000..aa9c85e3f --- /dev/null +++ b/autoapi/core/tests/models/test_equiformer_v2/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + core.tests.models.test_equiformer_v2 — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.models.test_equiformer_v2#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + + + + +

TestEquiformerV2

TestMPrimaryLPrimary

+
+
+

Functions#

+ + + + + + + + + +

load_data(request)

load_model(request)

+
+
+core.tests.models.test_equiformer_v2.load_data(request)#
+
+ +
+
+core.tests.models.test_equiformer_v2.load_model(request)#
+
+ +
+
+class core.tests.models.test_equiformer_v2.TestEquiformerV2#
+
+
+test_energy_force_shape(snapshot)#
+
+ +
+ +
+
+class core.tests.models.test_equiformer_v2.TestMPrimaryLPrimary#
+
+
+test_mprimary_lprimary_mappings()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_escn/index.html b/autoapi/core/tests/models/test_escn/index.html new file mode 100644 index 000000000..786af84f8 --- /dev/null +++ b/autoapi/core/tests/models/test_escn/index.html @@ -0,0 +1,774 @@ + + + + + + + + + + + core.tests.models.test_escn — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.models.test_escn

+ +
+ +
+
+ + + + +
+ +
+

core.tests.models.test_escn#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestMPrimaryLPrimary

+
+
+class core.tests.models.test_escn.TestMPrimaryLPrimary#
+
+
+test_mprimary_lprimary_mappings()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_gemnet/index.html b/autoapi/core/tests/models/test_gemnet/index.html new file mode 100644 index 000000000..951131622 --- /dev/null +++ b/autoapi/core/tests/models/test_gemnet/index.html @@ -0,0 +1,813 @@ + + + + + + + + + + + core.tests.models.test_gemnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.models.test_gemnet

+ +
+ +
+
+ + + + +
+ +
+

core.tests.models.test_gemnet#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestGemNetT

+
+
+

Functions#

+ + + + + + + + + +

load_data(→ None)

load_model(→ None)

+
+
+core.tests.models.test_gemnet.load_data(request) None#
+
+ +
+
+core.tests.models.test_gemnet.load_model(request) None#
+
+ +
+
+class core.tests.models.test_gemnet.TestGemNetT#
+
+
+test_rotation_invariance() None#
+
+ +
+
+test_energy_force_shape(snapshot) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_gemnet_oc/index.html b/autoapi/core/tests/models/test_gemnet_oc/index.html new file mode 100644 index 000000000..bc8a15596 --- /dev/null +++ b/autoapi/core/tests/models/test_gemnet_oc/index.html @@ -0,0 +1,813 @@ + + + + + + + + + + + core.tests.models.test_gemnet_oc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.models.test_gemnet_oc

+ +
+ +
+
+ + + + +
+ +
+

core.tests.models.test_gemnet_oc#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestGemNetOC

+
+
+

Functions#

+ + + + + + + + + +

load_data(→ None)

load_model(→ None)

+
+
+core.tests.models.test_gemnet_oc.load_data(request) None#
+
+ +
+
+core.tests.models.test_gemnet_oc.load_model(request) None#
+
+ +
+
+class core.tests.models.test_gemnet_oc.TestGemNetOC#
+
+
+test_rotation_invariance() None#
+
+ +
+
+test_energy_force_shape(snapshot) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html b/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html new file mode 100644 index 000000000..c13739185 --- /dev/null +++ b/autoapi/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html @@ -0,0 +1,798 @@ + + + + + + + + + + + core.tests.models.test_gemnet_oc_scaling_mismatch — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.models.test_gemnet_oc_scaling_mismatch#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestGemNetOC

+
+
+class core.tests.models.test_gemnet_oc_scaling_mismatch.TestGemNetOC#
+
+
+test_no_scaling_mismatch() None#
+
+ +
+
+test_scaling_mismatch() None#
+
+ +
+
+test_no_file_exists() None#
+
+ +
+
+test_not_fitted() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/models/test_schnet/index.html b/autoapi/core/tests/models/test_schnet/index.html new file mode 100644 index 000000000..dfe928970 --- /dev/null +++ b/autoapi/core/tests/models/test_schnet/index.html @@ -0,0 +1,813 @@ + + + + + + + + + + + core.tests.models.test_schnet — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.models.test_schnet

+ +
+ +
+
+ + + + +
+ +
+

core.tests.models.test_schnet#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestSchNet

+
+
+

Functions#

+ + + + + + + + + +

load_data(→ None)

load_model(→ None)

+
+
+core.tests.models.test_schnet.load_data(request) None#
+
+ +
+
+core.tests.models.test_schnet.load_model(request) None#
+
+ +
+
+class core.tests.models.test_schnet.TestSchNet#
+
+
+test_rotation_invariance() None#
+
+ +
+
+test_energy_force_shape(snapshot) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/preprocessing/index.html b/autoapi/core/tests/preprocessing/index.html new file mode 100644 index 000000000..e3c238a77 --- /dev/null +++ b/autoapi/core/tests/preprocessing/index.html @@ -0,0 +1,764 @@ + + + + + + + + + + + core.tests.preprocessing — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.preprocessing

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.tests.preprocessing#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.html b/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.html new file mode 100644 index 000000000..7d437a2b2 --- /dev/null +++ b/autoapi/core/tests/preprocessing/test_atoms_to_graphs/index.html @@ -0,0 +1,828 @@ + + + + + + + + + + + core.tests.preprocessing.test_atoms_to_graphs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.preprocessing.test_atoms_to_graphs#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestAtomsToGraphs

+
+
+

Functions#

+ + + + + + +

atoms_to_graphs_internals(→ None)

+
+
+core.tests.preprocessing.test_atoms_to_graphs.atoms_to_graphs_internals(request) None#
+
+ +
+
+class core.tests.preprocessing.test_atoms_to_graphs.TestAtomsToGraphs#
+
+
+test_gen_neighbors_pymatgen() None#
+
+ +
+
+test_convert() None#
+
+ +
+
+test_convert_all() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/preprocessing/test_pbc/index.html b/autoapi/core/tests/preprocessing/test_pbc/index.html new file mode 100644 index 000000000..c4e2f2fed --- /dev/null +++ b/autoapi/core/tests/preprocessing/test_pbc/index.html @@ -0,0 +1,814 @@ + + + + + + + + + + + core.tests.preprocessing.test_pbc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

core.tests.preprocessing.test_pbc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

core.tests.preprocessing.test_pbc#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestPBC

+
+
+

Functions#

+ + + + + + +

load_data(→ None)

+
+
+core.tests.preprocessing.test_pbc.load_data(request) None#
+
+ +
+
+class core.tests.preprocessing.test_pbc.TestPBC#
+
+
+test_pbc_distances() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.html b/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.html new file mode 100644 index 000000000..950ff9cae --- /dev/null +++ b/autoapi/core/tests/preprocessing/test_radius_graph_pbc/index.html @@ -0,0 +1,838 @@ + + + + + + + + + + + core.tests.preprocessing.test_radius_graph_pbc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.tests.preprocessing.test_radius_graph_pbc#

+

Copyright (c) Facebook, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestRadiusGraphPBC

+
+
+

Functions#

+ + + + + + + + + +

load_data(→ None)

check_features_match(→ bool)

+
+
+core.tests.preprocessing.test_radius_graph_pbc.load_data(request) None#
+
+ +
+
+core.tests.preprocessing.test_radius_graph_pbc.check_features_match(edge_index_1, cell_offsets_1, edge_index_2, cell_offsets_2) bool#
+
+ +
+
+class core.tests.preprocessing.test_radius_graph_pbc.TestRadiusGraphPBC#
+
+
+test_radius_graph_pbc() None#
+
+ +
+
+test_bulk() None#
+
+ +
+
+test_molecule() None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/trainers/base_trainer/index.html b/autoapi/core/trainers/base_trainer/index.html new file mode 100644 index 000000000..5bd846473 --- /dev/null +++ b/autoapi/core/trainers/base_trainer/index.html @@ -0,0 +1,939 @@ + + + + + + + + + + + core.trainers.base_trainer — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.trainers.base_trainer#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

BaseTrainer

Helper class that provides a standard way to create an ABC using

+
+
+class core.trainers.base_trainer.BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False)#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+property _unwrapped_model#
+
+ +
+
+abstract train(disable_eval_tqdm: bool = False) None#
+

Run model training iterations.

+
+ +
+
+static _get_timestamp(device: torch.device, suffix: str | None) str#
+
+ +
+
+load() None#
+
+ +
+
+set_seed(seed) None#
+
+ +
+
+load_seed_from_config() None#
+
+ +
+
+load_logger() None#
+
+ +
+
+get_sampler(dataset, batch_size: int, shuffle: bool) fairchem.core.common.data_parallel.BalancedBatchSampler#
+
+ +
+
+get_dataloader(dataset, sampler) torch.utils.data.DataLoader#
+
+ +
+
+load_datasets() None#
+
+ +
+
+load_task()#
+
+ +
+
+load_model() None#
+
+ +
+
+load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
+
+ +
+
+load_loss() None#
+
+ +
+
+load_optimizer() None#
+
+ +
+
+load_extras() None#
+
+ +
+
+save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) str | None#
+
+ +
+
+update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) None#
+
+ +
+
+validate(split: str = 'val', disable_tqdm: bool = False)#
+
+ +
+
+_backward(loss) None#
+
+ +
+
+save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) None#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/trainers/index.html b/autoapi/core/trainers/index.html new file mode 100644 index 000000000..b5ce698e5 --- /dev/null +++ b/autoapi/core/trainers/index.html @@ -0,0 +1,1044 @@ + + + + + + + + + + + core.trainers — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.trainers#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + + + + +

BaseTrainer

Helper class that provides a standard way to create an ABC using

OCPTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

+
+
+class core.trainers.BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False)#
+

Bases: abc.ABC

+

Helper class that provides a standard way to create an ABC using +inheritance.

+
+
+property _unwrapped_model#
+
+ +
+
+abstract train(disable_eval_tqdm: bool = False) None#
+

Run model training iterations.

+
+ +
+
+static _get_timestamp(device: torch.device, suffix: str | None) str#
+
+ +
+
+load() None#
+
+ +
+
+set_seed(seed) None#
+
+ +
+
+load_seed_from_config() None#
+
+ +
+
+load_logger() None#
+
+ +
+
+get_sampler(dataset, batch_size: int, shuffle: bool) fairchem.core.common.data_parallel.BalancedBatchSampler#
+
+ +
+
+get_dataloader(dataset, sampler) torch.utils.data.DataLoader#
+
+ +
+
+load_datasets() None#
+
+ +
+
+load_task()#
+
+ +
+
+load_model() None#
+
+ +
+
+load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
+
+ +
+
+load_loss() None#
+
+ +
+
+load_optimizer() None#
+
+ +
+
+load_extras() None#
+
+ +
+
+save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) str | None#
+
+ +
+
+update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) None#
+
+ +
+
+validate(split: str = 'val', disable_tqdm: bool = False)#
+
+ +
+
+_backward(loss) None#
+
+ +
+
+save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) None#
+
+ +
+ +
+
+class core.trainers.OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
+

Bases: fairchem.core.trainers.base_trainer.BaseTrainer

+

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to +Relaxed State (IS2RS) tasks.

+
+

Note

+

Examples of configurations for task, model, dataset and optimizer +can be found in configs/ocp_s2ef +and configs/ocp_is2rs.

+
+
+
Parameters:
+
    +
  • task (dict) – Task configuration.

  • +
  • model (dict) – Model configuration.

  • +
  • outputs (dict) – Output property configuration.

  • +
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • +
  • optimizer (dict) – Optimizer configuration.

  • +
  • loss_fns (dict) – Loss function configuration.

  • +
  • eval_metrics (dict) – Evaluation metrics configuration.

  • +
  • identifier (str) – Experiment identifier that is appended to log directory.

  • +
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. +(default: None)

  • +
  • is_debug (bool, optional) – Run in debug mode. +(default: False)

  • +
  • print_every (int, optional) – Frequency of printing logs. +(default: 100)

  • +
  • seed (int, optional) – Random number seed. +(default: None)

  • +
  • logger (str, optional) – Type of logger to be used. +(default: wandb)

  • +
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. +(default: 0)

  • +
  • amp (bool, optional) – Run using automatic mixed precision. +(default: False)

  • +
  • slurm (dict) – Slurm configuration. Currently just for keeping track. +(default: {})

  • +
  • noddp (bool, optional) – Run model without DDP.

  • +
+
+
+
+
+train(disable_eval_tqdm: bool = False) None#
+

Run model training iterations.

+
+ +
+
+_forward(batch)#
+
+ +
+
+_compute_loss(out, batch)#
+
+ +
+
+_compute_metrics(out, batch, evaluator, metrics=None)#
+
+ +
+
+predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False)#
+
+ +
+
+run_relaxations(split='val')#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/core/trainers/ocp_trainer/index.html b/autoapi/core/trainers/ocp_trainer/index.html new file mode 100644 index 000000000..c41f0a690 --- /dev/null +++ b/autoapi/core/trainers/ocp_trainer/index.html @@ -0,0 +1,871 @@ + + + + + + + + + + + core.trainers.ocp_trainer — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

core.trainers.ocp_trainer#

+

Copyright (c) Meta, Inc. and its affiliates.

+

This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree.

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

OCPTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

+
+
+class core.trainers.ocp_trainer.OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
+

Bases: fairchem.core.trainers.base_trainer.BaseTrainer

+

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to +Relaxed State (IS2RS) tasks.

+
+

Note

+

Examples of configurations for task, model, dataset and optimizer +can be found in configs/ocp_s2ef +and configs/ocp_is2rs.

+
+
+
Parameters:
+
    +
  • task (dict) – Task configuration.

  • +
  • model (dict) – Model configuration.

  • +
  • outputs (dict) – Output property configuration.

  • +
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • +
  • optimizer (dict) – Optimizer configuration.

  • +
  • loss_fns (dict) – Loss function configuration.

  • +
  • eval_metrics (dict) – Evaluation metrics configuration.

  • +
  • identifier (str) – Experiment identifier that is appended to log directory.

  • +
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. +(default: None)

  • +
  • is_debug (bool, optional) – Run in debug mode. +(default: False)

  • +
  • print_every (int, optional) – Frequency of printing logs. +(default: 100)

  • +
  • seed (int, optional) – Random number seed. +(default: None)

  • +
  • logger (str, optional) – Type of logger to be used. +(default: wandb)

  • +
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. +(default: 0)

  • +
  • amp (bool, optional) – Run using automatic mixed precision. +(default: False)

  • +
  • slurm (dict) – Slurm configuration. Currently just for keeping track. +(default: {})

  • +
  • noddp (bool, optional) – Run model without DDP.

  • +
+
+
+
+
+train(disable_eval_tqdm: bool = False) None#
+

Run model training iterations.

+
+ +
+
+_forward(batch)#
+
+ +
+
+_compute_loss(out, batch)#
+
+ +
+
+_compute_metrics(out, batch, evaluator, metrics=None)#
+
+ +
+
+predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False)#
+
+ +
+
+run_relaxations(split='val')#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/index.html b/autoapi/data/index.html new file mode 100644 index 000000000..0ad7e4e0b --- /dev/null +++ b/autoapi/data/index.html @@ -0,0 +1,791 @@ + + + + + + + + + + + data — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data#

+
+

Subpackages#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/adsorbate/index.html b/autoapi/data/oc/core/adsorbate/index.html new file mode 100644 index 000000000..2ff3ea9fd --- /dev/null +++ b/autoapi/data/oc/core/adsorbate/index.html @@ -0,0 +1,865 @@ + + + + + + + + + + + data.oc.core.adsorbate — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core.adsorbate#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Adsorbate

Initializes an adsorbate object in one of 4 ways:

+
+
+

Functions#

+ + + + + + +

randomly_rotate_adsorbate(adsorbate_atoms[, mode, ...])

+
+
+class data.oc.core.adsorbate.Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None)#
+

Initializes an adsorbate object in one of 4 ways: +- Directly pass in an ase.Atoms object.

+
+

For this, you should also provide the index of the binding atom.

+
+
    +
  • Pass in index of adsorbate to select from adsorbate database.

  • +
  • Pass in the SMILES string of the adsorbate to select from the database.

  • +
  • Randomly sample an adsorbate from the adsorbate database.

  • +
+
+
Parameters:
+
    +
  • adsorbate_atoms (ase.Atoms) – Adsorbate structure.

  • +
  • adsorbate_id_from_db (int) – Index of adsorbate to select.

  • +
  • adsorbate_smiles_from_db (str) – A SMILES string of the desired adsorbate.

  • +
  • adsorbate_db_path (str) – Path to adsorbate database.

  • +
  • adsorbate_binding_indices (list) – The index/indices of the adsorbate atoms which are expected to bind.

  • +
+
+
+
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+_get_adsorbate_from_random(adsorbate_db)#
+
+ +
+
+_load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) None#
+

Saves the fields from an adsorbate stored in a database. Fields added +after the first revision are conditionally added for backwards +compatibility with older database files.

+
+ +
+ +
+
+data.oc.core.adsorbate.randomly_rotate_adsorbate(adsorbate_atoms: ase.Atoms, mode: str = 'random', binding_idx: int = None)#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/adsorbate_slab_config/index.html b/autoapi/data/oc/core/adsorbate_slab_config/index.html new file mode 100644 index 000000000..a351d4abf --- /dev/null +++ b/autoapi/data/oc/core/adsorbate_slab_config/index.html @@ -0,0 +1,1059 @@ + + + + + + + + + + + data.oc.core.adsorbate_slab_config — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core.adsorbate_slab_config#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

AdsorbateSlabConfig

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

+
+
+

Functions#

+ + + + + + + + + + + + + + + +

get_random_sites_on_triangle(vertices[, num_sites])

Sample num_sites random sites uniformly on a given 3D triangle.

custom_tile_atoms(atoms)

Tile the atoms so that the center tile has the indices and positions of the

get_interstitial_distances(adsorbate_slab_config)

Check to see if there is any atomic overlap between surface atoms

there_is_overlap(adsorbate_slab_config)

Check to see if there is any atomic overlap between surface atoms

+
+
+class data.oc.core.adsorbate_slab_config.AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random')#
+

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

+
+
Parameters:
+
    +
  • slab (Slab) – Slab object.

  • +
  • adsorbate (Adsorbate) – Adsorbate object.

  • +
  • num_sites (int) – Number of sites to sample.

  • +
  • num_augmentations_per_site (int) – Number of augmentations of the adsorbate per site. Total number of +generated structures will be num_sites * num_augmentations_per_site.

  • +
  • interstitial_gap (float) – Minimum distance in Angstroms between adsorbate and slab atoms.

  • +
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. +This affects surface site sampling and adsorbate placement on each site.

    +

    In “random”, we do a Delaunay triangulation of the surface atoms, then +sample sites uniformly at random within each triangle. When placing the +adsorbate, we randomly rotate it along xyz, and place it such that the +center of mass is at the site.

    +

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most +energetically favorable sites, i.e., ontop, bridge, or hollow sites. +When placing the adsorbate, we randomly rotate it along z with only +slight rotation along x and y, and place it such that the binding atom +is at the site.

    +

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of +the surface atoms, then sample sites uniformly at random within each +triangle. When placing the adsorbate, we randomly rotate it along z with +only slight rotation along x and y, and place it such that the binding +atom is at the site.

    +

    In all cases, the adsorbate is placed at the closest position of no +overlap with the slab plus interstitial_gap along the surface normal.

    +

  • +
+
+
+
+
+get_binding_sites(num_sites: int)#
+

Returns up to num_sites sites given the surface atoms’ positions.

+
+ +
+
+place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding site.

+
+ +
+
+place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding sites.

+
+ +
+
+_get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1)#
+

Get the scaled normal that gives a proximate configuration without atomic +overlap by:

+
+
    +
  1. Projecting the adsorbate and surface atoms onto the surface plane.

  2. +
  3. +
    Identify all adsorbate atom - surface atom combinations for which

    an itersection when translating along the normal would occur. +This is where the distance between the projected points is less than +r_surface_atom + r_adsorbate_atom

    +
    +
    +
  4. +
  5. +
    Explicitly solve for the scaled normal at which the distance between

    surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + +interstitial_gap. This exploits the superposition of vectors and the +distance formula, so it requires root finding.

    +
    +
    +
  6. +
+
+

Assumes that the adsorbate’s binding atom or center-of-mass (depending +on mode) is already placed at the site.

+
+
Parameters:
+
    +
  • adsorbate_c (ase.Atoms) – A copy of the adsorbate with coordinates at the site

  • +
  • slab_c (ase.Atoms) – A copy of the slab

  • +
  • site (np.ndarray) – the coordinate of the site

  • +
  • adsorbate_atoms (ase.Atoms) – the translated adsorbate

  • +
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • +
  • interstitial_gap (float) – the desired distance between the covalent radii of the +closest surface and adsorbate atom

  • +
+
+
Returns:
+

the magnitude of the normal vector for placement

+
+
Return type:
+

(float)

+
+
+
+ +
+
+_find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float)#
+

Find the pairs of surface and adsorbate atoms that would have an intersection event +while traversing the normal vector. For each pair, return pertanent information for +finding the point of intersection. +:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site +:type adsorbate_c2: ase.Atoms +:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

+
+

about the site

+
+
+
Parameters:
+
    +
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • +
  • interstitial_gap (float) – the desired distance between the covalent radii of the +closest surface and adsorbate atom

  • +
+
+
Returns:
+

+
each entry in the list corresponds to one pair to check. With the
+
following information:

[(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position]

+
+
+
+
+

+
+
Return type:
+

(list[lists])

+
+
+
+ +
+
+_get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray)#
+

Find the x and y coordinates of each atom projected onto the surface plane. +:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site +:type adsorbate_c2: ase.Atoms +:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

+
+

about the site

+
+
+
Parameters:
+

unit_normal (np.ndarray) – the unit vector normal to the surface

+
+
Returns:
+

{“ads”: [[x1, y1], [x2, y2], …], “slab”: [[x1, y1], [x2, y2], …],}

+
+
Return type:
+

(dict)

+
+
+
+ +
+
+get_metadata_dict(ind)#
+

Returns a dict containing the atoms object and metadata for +one specified config, used for writing to files.

+
+ +
+ +
+
+data.oc.core.adsorbate_slab_config.get_random_sites_on_triangle(vertices: numpy.ndarray, num_sites: int = 10)#
+

Sample num_sites random sites uniformly on a given 3D triangle. +Following Sec. 4.2 from https://www.cs.princeton.edu/~funk/tog02.pdf.

+
+ +
+
+data.oc.core.adsorbate_slab_config.custom_tile_atoms(atoms: ase.Atoms)#
+

Tile the atoms so that the center tile has the indices and positions of the +untiled structure.

+
+
Parameters:
+

atoms (ase.Atoms) – the atoms object to be tiled

+
+
Returns:
+

+
the tiled atoms which has been repeated 3 times in

the x and y directions but maintains the original indices on the central +unit cell.

+
+
+

+
+
Return type:
+

(ase.Atoms)

+
+
+
+ +
+
+data.oc.core.adsorbate_slab_config.get_interstitial_distances(adsorbate_slab_config: ase.Atoms)#
+

Check to see if there is any atomic overlap between surface atoms +and adsorbate atoms.

+
+
Parameters:
+

adsorbate_slab_configuration (ase.Atoms) – an slab atoms object with an +adsorbate placed

+
+
Returns:
+

True if there is atomic overlap, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+data.oc.core.adsorbate_slab_config.there_is_overlap(adsorbate_slab_config: ase.Atoms)#
+

Check to see if there is any atomic overlap between surface atoms +and adsorbate atoms.

+
+
Parameters:
+

adsorbate_slab_configuration (ase.Atoms) – an slab atoms object with an +adsorbate placed

+
+
Returns:
+

True if there is atomic overlap, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/bulk/index.html b/autoapi/data/oc/core/bulk/index.html new file mode 100644 index 000000000..da7710085 --- /dev/null +++ b/autoapi/data/oc/core/bulk/index.html @@ -0,0 +1,861 @@ + + + + + + + + + + + data.oc.core.bulk — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core.bulk#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Bulk

Initializes a bulk object in one of 4 ways:

+
+
+class data.oc.core.bulk.Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None)#
+

Initializes a bulk object in one of 4 ways: +- Directly pass in an ase.Atoms object. +- Pass in index of bulk to select from bulk database. +- Pass in the src_id of the bulk to select from the bulk database. +- Randomly sample a bulk from bulk database if no other option is passed.

+
+
Parameters:
+
    +
  • bulk_atoms (ase.Atoms) – Bulk structure.

  • +
  • bulk_id_from_db (int) – Index of bulk in database pkl to select.

  • +
  • bulk_src_id_from_db (int) – Src id of bulk to select (e.g. “mp-30”).

  • +
  • bulk_db_path (str) – Path to bulk database.

  • +
  • bulk_db (List[Dict[str, Any]]) – Already-loaded database.

  • +
+
+
+
+
+_get_bulk_from_random(bulk_db)#
+
+ +
+
+set_source_dataset_id(src_id: str)#
+
+ +
+
+set_bulk_id_from_db(bulk_id_from_db: int)#
+
+ +
+
+get_slabs(max_miller=2, precomputed_slabs_dir=None)#
+

Returns a list of possible slabs for this bulk instance.

+
+ +
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+__eq__(other) bool#
+

Return self==value.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/index.html b/autoapi/data/oc/core/index.html new file mode 100644 index 000000000..a73281e34 --- /dev/null +++ b/autoapi/data/oc/core/index.html @@ -0,0 +1,1327 @@ + + + + + + + + + + + data.oc.core — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + + + + + + + + + + + + + +

Bulk

Initializes a bulk object in one of 4 ways:

Slab

Initializes a slab object, i.e. a particular slab tiled along xyz, in

Adsorbate

Initializes an adsorbate object in one of 4 ways:

AdsorbateSlabConfig

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

MultipleAdsorbateSlabConfig

Class to represent a slab with multiple adsorbates on it. This class only

+
+
+class data.oc.core.Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None)#
+

Initializes a bulk object in one of 4 ways: +- Directly pass in an ase.Atoms object. +- Pass in index of bulk to select from bulk database. +- Pass in the src_id of the bulk to select from the bulk database. +- Randomly sample a bulk from bulk database if no other option is passed.

+
+
Parameters:
+
    +
  • bulk_atoms (ase.Atoms) – Bulk structure.

  • +
  • bulk_id_from_db (int) – Index of bulk in database pkl to select.

  • +
  • bulk_src_id_from_db (int) – Src id of bulk to select (e.g. “mp-30”).

  • +
  • bulk_db_path (str) – Path to bulk database.

  • +
  • bulk_db (List[Dict[str, Any]]) – Already-loaded database.

  • +
+
+
+
+
+_get_bulk_from_random(bulk_db)#
+
+ +
+
+set_source_dataset_id(src_id: str)#
+
+ +
+
+set_bulk_id_from_db(bulk_id_from_db: int)#
+
+ +
+
+get_slabs(max_miller=2, precomputed_slabs_dir=None)#
+

Returns a list of possible slabs for this bulk instance.

+
+ +
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+__eq__(other) bool#
+

Return self==value.

+
+ +
+ +
+
+class data.oc.core.Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8)#
+

Initializes a slab object, i.e. a particular slab tiled along xyz, in +one of 2 ways: +- Pass in a Bulk object and a slab 5-tuple containing +(atoms, miller, shift, top, oriented bulk). +- Pass in a Bulk object and randomly sample a slab.

+
+
Parameters:
+
    +
  • bulk (Bulk) – Corresponding Bulk object.

  • +
  • slab_atoms (ase.Atoms) – Slab atoms, tiled and tagged

  • +
  • millers (tuple) – Miller indices of slab.

  • +
  • shift (float) – Shift of slab.

  • +
  • top (bool) – Whether slab is top or bottom.

  • +
  • min_ab (float) – To confirm that the tiled structure spans this distance

  • +
+
+
+
+
+classmethod from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0)#
+
+ +
+
+classmethod from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs)#
+
+ +
+
+has_surface_tagged()#
+
+ +
+
+get_metadata_dict()#
+
+ +
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+__eq__(other)#
+

Return self==value.

+
+ +
+ +
+
+class data.oc.core.Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None)#
+

Initializes an adsorbate object in one of 4 ways: +- Directly pass in an ase.Atoms object.

+
+

For this, you should also provide the index of the binding atom.

+
+
    +
  • Pass in index of adsorbate to select from adsorbate database.

  • +
  • Pass in the SMILES string of the adsorbate to select from the database.

  • +
  • Randomly sample an adsorbate from the adsorbate database.

  • +
+
+
Parameters:
+
    +
  • adsorbate_atoms (ase.Atoms) – Adsorbate structure.

  • +
  • adsorbate_id_from_db (int) – Index of adsorbate to select.

  • +
  • adsorbate_smiles_from_db (str) – A SMILES string of the desired adsorbate.

  • +
  • adsorbate_db_path (str) – Path to adsorbate database.

  • +
  • adsorbate_binding_indices (list) – The index/indices of the adsorbate atoms which are expected to bind.

  • +
+
+
+
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+_get_adsorbate_from_random(adsorbate_db)#
+
+ +
+
+_load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) None#
+

Saves the fields from an adsorbate stored in a database. Fields added +after the first revision are conditionally added for backwards +compatibility with older database files.

+
+ +
+ +
+
+class data.oc.core.AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random')#
+

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

+
+
Parameters:
+
    +
  • slab (Slab) – Slab object.

  • +
  • adsorbate (Adsorbate) – Adsorbate object.

  • +
  • num_sites (int) – Number of sites to sample.

  • +
  • num_augmentations_per_site (int) – Number of augmentations of the adsorbate per site. Total number of +generated structures will be num_sites * num_augmentations_per_site.

  • +
  • interstitial_gap (float) – Minimum distance in Angstroms between adsorbate and slab atoms.

  • +
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. +This affects surface site sampling and adsorbate placement on each site.

    +

    In “random”, we do a Delaunay triangulation of the surface atoms, then +sample sites uniformly at random within each triangle. When placing the +adsorbate, we randomly rotate it along xyz, and place it such that the +center of mass is at the site.

    +

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most +energetically favorable sites, i.e., ontop, bridge, or hollow sites. +When placing the adsorbate, we randomly rotate it along z with only +slight rotation along x and y, and place it such that the binding atom +is at the site.

    +

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of +the surface atoms, then sample sites uniformly at random within each +triangle. When placing the adsorbate, we randomly rotate it along z with +only slight rotation along x and y, and place it such that the binding +atom is at the site.

    +

    In all cases, the adsorbate is placed at the closest position of no +overlap with the slab plus interstitial_gap along the surface normal.

    +

  • +
+
+
+
+
+get_binding_sites(num_sites: int)#
+

Returns up to num_sites sites given the surface atoms’ positions.

+
+ +
+
+place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding site.

+
+ +
+
+place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding sites.

+
+ +
+
+_get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1)#
+

Get the scaled normal that gives a proximate configuration without atomic +overlap by:

+
+
    +
  1. Projecting the adsorbate and surface atoms onto the surface plane.

  2. +
  3. +
    Identify all adsorbate atom - surface atom combinations for which

    an itersection when translating along the normal would occur. +This is where the distance between the projected points is less than +r_surface_atom + r_adsorbate_atom

    +
    +
    +
  4. +
  5. +
    Explicitly solve for the scaled normal at which the distance between

    surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + +interstitial_gap. This exploits the superposition of vectors and the +distance formula, so it requires root finding.

    +
    +
    +
  6. +
+
+

Assumes that the adsorbate’s binding atom or center-of-mass (depending +on mode) is already placed at the site.

+
+
Parameters:
+
    +
  • adsorbate_c (ase.Atoms) – A copy of the adsorbate with coordinates at the site

  • +
  • slab_c (ase.Atoms) – A copy of the slab

  • +
  • site (np.ndarray) – the coordinate of the site

  • +
  • adsorbate_atoms (ase.Atoms) – the translated adsorbate

  • +
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • +
  • interstitial_gap (float) – the desired distance between the covalent radii of the +closest surface and adsorbate atom

  • +
+
+
Returns:
+

the magnitude of the normal vector for placement

+
+
Return type:
+

(float)

+
+
+
+ +
+
+_find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float)#
+

Find the pairs of surface and adsorbate atoms that would have an intersection event +while traversing the normal vector. For each pair, return pertanent information for +finding the point of intersection. +:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site +:type adsorbate_c2: ase.Atoms +:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

+
+

about the site

+
+
+
Parameters:
+
    +
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • +
  • interstitial_gap (float) – the desired distance between the covalent radii of the +closest surface and adsorbate atom

  • +
+
+
Returns:
+

+
each entry in the list corresponds to one pair to check. With the
+
following information:

[(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position]

+
+
+
+
+

+
+
Return type:
+

(list[lists])

+
+
+
+ +
+
+_get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray)#
+

Find the x and y coordinates of each atom projected onto the surface plane. +:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site +:type adsorbate_c2: ase.Atoms +:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

+
+

about the site

+
+
+
Parameters:
+

unit_normal (np.ndarray) – the unit vector normal to the surface

+
+
Returns:
+

{“ads”: [[x1, y1], [x2, y2], …], “slab”: [[x1, y1], [x2, y2], …],}

+
+
Return type:
+

(dict)

+
+
+
+ +
+
+get_metadata_dict(ind)#
+

Returns a dict containing the atoms object and metadata for +one specified config, used for writing to files.

+
+ +
+ +
+
+class data.oc.core.MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement')#
+

Bases: fairchem.data.oc.core.AdsorbateSlabConfig

+

Class to represent a slab with multiple adsorbates on it. This class only +returns a fixed combination of adsorbates placed on the surface. Unlike +AdsorbateSlabConfig which enumerates all possible adsorbate placements, this +problem gets combinatorially large.

+
+
Parameters:
+
    +
  • slab (Slab) – Slab object.

  • +
  • adsorbates (List[Adsorbate]) – List of adsorbate objects to place on the slab.

  • +
  • num_sites (int) – Number of sites to sample.

  • +
  • num_configurations (int) – Number of configurations to generate per slab+adsorbate(s) combination. +This corresponds to selecting different site combinations to place +the adsorbates on.

  • +
  • interstitial_gap (float) – Minimum distance, in Angstroms, between adsorbate and slab atoms as +well as the inter-adsorbate distance.

  • +
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. +This affects surface site sampling and adsorbate placement on each site.

    +

    In “random”, we do a Delaunay triangulation of the surface atoms, then +sample sites uniformly at random within each triangle. When placing the +adsorbate, we randomly rotate it along xyz, and place it such that the +center of mass is at the site.

    +

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most +energetically favorable sites, i.e., ontop, bridge, or hollow sites. +When placing the adsorbate, we randomly rotate it along z with only +slight rotation along x and y, and place it such that the binding atom +is at the site.

    +

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of +the surface atoms, then sample sites uniformly at random within each +triangle. When placing the adsorbate, we randomly rotate it along z with +only slight rotation along x and y, and place it such that the binding +atom is at the site.

    +

    In all cases, the adsorbate is placed at the closest position of no +overlap with the slab plus interstitial_gap along the surface normal.

    +

  • +
+
+
+
+
+place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding sites.

+

This method generates a fixed number of configurations where sites are +selected to ensure that adsorbate binding indices are at least a fair +distance away from each other (covalent radii + interstitial gap). +While this helps prevent adsorbate overlap it does not gaurantee it +since non-binding adsorbate atoms can overlap if the right combination +of angles is sampled.

+
+ +
+
+get_metadata_dict(ind)#
+

Returns a dict containing the atoms object and metadata for +one specified config, used for writing to files.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/multi_adsorbate_slab_config/index.html b/autoapi/data/oc/core/multi_adsorbate_slab_config/index.html new file mode 100644 index 000000000..9de8c134c --- /dev/null +++ b/autoapi/data/oc/core/multi_adsorbate_slab_config/index.html @@ -0,0 +1,869 @@ + + + + + + + + + + + data.oc.core.multi_adsorbate_slab_config — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core.multi_adsorbate_slab_config#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

MultipleAdsorbateSlabConfig

Class to represent a slab with multiple adsorbates on it. This class only

+
+
+

Functions#

+ + + + + + +

update_distance_map(prev_distance_map, site_idx, ...)

Given a new site and the adsorbate we plan on placing there,

+
+
+class data.oc.core.multi_adsorbate_slab_config.MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement')#
+

Bases: fairchem.data.oc.core.AdsorbateSlabConfig

+

Class to represent a slab with multiple adsorbates on it. This class only +returns a fixed combination of adsorbates placed on the surface. Unlike +AdsorbateSlabConfig which enumerates all possible adsorbate placements, this +problem gets combinatorially large.

+
+
Parameters:
+
    +
  • slab (Slab) – Slab object.

  • +
  • adsorbates (List[Adsorbate]) – List of adsorbate objects to place on the slab.

  • +
  • num_sites (int) – Number of sites to sample.

  • +
  • num_configurations (int) – Number of configurations to generate per slab+adsorbate(s) combination. +This corresponds to selecting different site combinations to place +the adsorbates on.

  • +
  • interstitial_gap (float) – Minimum distance, in Angstroms, between adsorbate and slab atoms as +well as the inter-adsorbate distance.

  • +
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. +This affects surface site sampling and adsorbate placement on each site.

    +

    In “random”, we do a Delaunay triangulation of the surface atoms, then +sample sites uniformly at random within each triangle. When placing the +adsorbate, we randomly rotate it along xyz, and place it such that the +center of mass is at the site.

    +

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most +energetically favorable sites, i.e., ontop, bridge, or hollow sites. +When placing the adsorbate, we randomly rotate it along z with only +slight rotation along x and y, and place it such that the binding atom +is at the site.

    +

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of +the surface atoms, then sample sites uniformly at random within each +triangle. When placing the adsorbate, we randomly rotate it along z with +only slight rotation along x and y, and place it such that the binding +atom is at the site.

    +

    In all cases, the adsorbate is placed at the closest position of no +overlap with the slab plus interstitial_gap along the surface normal.

    +

  • +
+
+
+
+
+place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1)#
+

Place the adsorbate at the given binding sites.

+

This method generates a fixed number of configurations where sites are +selected to ensure that adsorbate binding indices are at least a fair +distance away from each other (covalent radii + interstitial gap). +While this helps prevent adsorbate overlap it does not gaurantee it +since non-binding adsorbate atoms can overlap if the right combination +of angles is sampled.

+
+ +
+
+get_metadata_dict(ind)#
+

Returns a dict containing the atoms object and metadata for +one specified config, used for writing to files.

+
+ +
+ +
+
+data.oc.core.multi_adsorbate_slab_config.update_distance_map(prev_distance_map, site_idx, adsorbate, pseudo_atoms)#
+

Given a new site and the adsorbate we plan on placing there, +update the distance mapping to reflect the new distances from sites to nearest adsorbates. +We incorporate the covalent radii of the placed adsorbate binding atom in our distance +calculation to prevent atom overlap.

+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/core/slab/index.html b/autoapi/data/oc/core/slab/index.html new file mode 100644 index 000000000..6fe2c2814 --- /dev/null +++ b/autoapi/data/oc/core/slab/index.html @@ -0,0 +1,1221 @@ + + + + + + + + + + + data.oc.core.slab — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.core.slab#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

Slab

Initializes a slab object, i.e. a particular slab tiled along xyz, in

+
+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

tile_and_tag_atoms(unit_slab_struct, bulk_atoms[, min_ab])

This function combines the next three functions that tile, tag,

set_fixed_atom_constraints(atoms)

This function fixes sub-surface atoms of a surface. Also works on systems

tag_surface_atoms([slab_atoms, bulk_atoms])

Sets the tags of an ase.Atoms object. Any atom that we consider a "bulk"

tile_atoms(atoms[, min_ab])

This function will repeat an atoms structure in the direction of the a and b

find_surface_atoms_by_height(surface_atoms)

As discussed in the docstring for find_surface_atoms_with_voronoi,

find_surface_atoms_with_voronoi_given_height(...)

Labels atoms as surface or bulk atoms according to their coordination

calculate_center_of_mass(struct)

Calculates the center of mass of the slab.

calculate_coordination_of_bulk_atoms(bulk_atoms)

Finds all unique atoms in a bulk structure and then determines their

compute_slabs([bulk_atoms, max_miller, specific_millers])

Enumerates all the symmetrically distinct slabs of a bulk structure.

flip_struct(struct)

Flips an atoms object upside down. Normally used to flip slabs.

is_structure_invertible(struct)

This function figures out whether or not an Structure

standardize_bulk(atoms)

There are many ways to define a bulk unit cell. If you change the unit

+
+
+class data.oc.core.slab.Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8)#
+

Initializes a slab object, i.e. a particular slab tiled along xyz, in +one of 2 ways: +- Pass in a Bulk object and a slab 5-tuple containing +(atoms, miller, shift, top, oriented bulk). +- Pass in a Bulk object and randomly sample a slab.

+
+
Parameters:
+
    +
  • bulk (Bulk) – Corresponding Bulk object.

  • +
  • slab_atoms (ase.Atoms) – Slab atoms, tiled and tagged

  • +
  • millers (tuple) – Miller indices of slab.

  • +
  • shift (float) – Shift of slab.

  • +
  • top (bool) – Whether slab is top or bottom.

  • +
  • min_ab (float) – To confirm that the tiled structure spans this distance

  • +
+
+
+
+
+classmethod from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
+
+ +
+
+classmethod from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0)#
+
+ +
+
+classmethod from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs)#
+
+ +
+
+has_surface_tagged()#
+
+ +
+
+get_metadata_dict()#
+
+ +
+
+__len__()#
+
+ +
+
+__str__()#
+

Return str(self).

+
+ +
+
+__repr__()#
+

Return repr(self).

+
+ +
+
+__eq__(other)#
+

Return self==value.

+
+ +
+ +
+
+data.oc.core.slab.tile_and_tag_atoms(unit_slab_struct: pymatgen.core.structure.Structure, bulk_atoms: ase.Atoms, min_ab: float = 8)#
+

This function combines the next three functions that tile, tag, +and constrain the atoms.

+
+
Parameters:
+
    +
  • unit_slab_struct (Structure) – The untiled slab structure

  • +
  • bulk_atoms (ase.Atoms) – Atoms of the corresponding bulk structure, used for tagging

  • +
  • min_ab (float) – The minimum distance in x and y spanned by the tiled structure.

  • +
+
+
Returns:
+

atoms_tiled – A copy of the slab atoms that is tiled, tagged, and constrained

+
+
Return type:
+

ase.Atoms

+
+
+
+ +
+
+data.oc.core.slab.set_fixed_atom_constraints(atoms)#
+

This function fixes sub-surface atoms of a surface. Also works on systems +that have surface + adsorbate(s), as long as the bulk atoms are tagged with +0, surface atoms are tagged with 1, and the adsorbate atoms are tagged +with 2 or above.

+

This is used for both surface atoms and the combined surface+adsorbate.

+
+
Parameters:
+

atoms (ase.Atoms) – Atoms object of the slab or slab+adsorbate system, with bulk atoms +tagged as 0, surface atoms tagged as 1, and adsorbate atoms tagged +as 2 or above.

+
+
Returns:
+

atoms – A deep copy of the atoms argument, but where the appropriate +atoms are constrained.

+
+
Return type:
+

ase.Atoms

+
+
+
+ +
+
+data.oc.core.slab.tag_surface_atoms(slab_atoms: ase.Atoms = None, bulk_atoms: ase.Atoms = None)#
+

Sets the tags of an ase.Atoms object. Any atom that we consider a “bulk” +atom will have a tag of 0, and any atom that we consider a “surface” atom +will have a tag of 1. We use a combination of Voronoi neighbor algorithms +(adapted from pymatgen.core.surface.Slab.get_surface_sites; see +https://pymatgen.org/pymatgen.core.surface.html) and a distance cutoff.

+
+
Parameters:
+
    +
  • slab_atoms (ase.Atoms) – The slab where you are trying to find surface sites.

  • +
  • bulk_atoms (ase.Atoms) – The bulk structure that the surface was cut from.

  • +
+
+
Returns:
+

slab_atoms – A copy of the slab atoms with the surface atoms tagged as 1.

+
+
Return type:
+

ase.Atoms

+
+
+
+ +
+
+data.oc.core.slab.tile_atoms(atoms: ase.Atoms, min_ab: float = 8)#
+

This function will repeat an atoms structure in the direction of the a and b +lattice vectors such that they are at least as wide as the min_ab constant.

+
+
Parameters:
+
    +
  • atoms (ase.Atoms) – The structure to tile.

  • +
  • min_ab (float) – The minimum distance in x and y spanned by the tiled structure.

  • +
+
+
Returns:
+

atoms_tiled – The tiled structure.

+
+
Return type:
+

ase.Atoms

+
+
+
+ +
+
+data.oc.core.slab.find_surface_atoms_by_height(surface_atoms)#
+

As discussed in the docstring for find_surface_atoms_with_voronoi, +sometimes we might accidentally tag a surface atom as a bulk atom if there +are multiple coordination environments for that atom type within the bulk. +One heuristic that we use to address this is to simply figure out if an +atom is close to the surface. This function will figure that out.

+

Specifically: We consider an atom a surface atom if it is within 2 +Angstroms of the heighest atom in the z-direction (or more accurately, the +direction of the 3rd unit cell vector).

+
+
Parameters:
+

surface_atoms (ase.Atoms)

+
+
Returns:
+

tags – A list that contains the indices of the surface atoms.

+
+
Return type:
+

list

+
+
+
+ +
+
+data.oc.core.slab.find_surface_atoms_with_voronoi_given_height(bulk_atoms, slab_atoms, height_tags)#
+

Labels atoms as surface or bulk atoms according to their coordination +relative to their bulk structure. If an atom’s coordination is less than it +normally is in a bulk, then we consider it a surface atom. We calculate the +coordination using pymatgen’s Voronoi algorithms.

+

Note that if a single element has different sites within a bulk and these +sites have different coordinations, then we consider slab atoms +“under-coordinated” only if they are less coordinated than the most under +undercoordinated bulk atom. For example: Say we have a bulk with two Cu +sites. One site has a coordination of 12 and another a coordination of 9. +If a slab atom has a coordination of 10, we will consider it a bulk atom.

+
+
Parameters:
+
    +
  • bulk_atoms (ase.Atoms) – The bulk structure that the surface was cut from.

  • +
  • slab_atoms (ase.Atoms) – The slab structure.

  • +
  • height_tags (list) – The tags determined by the find_surface_atoms_by_height algo.

  • +
+
+
Returns:
+

tags – A list of 0s and 1s whose indices align with the atoms in +slab_atoms. 0s indicate a bulk atom and 1 indicates a surface atom.

+
+
Return type:
+

list

+
+
+
+ +
+
+data.oc.core.slab.calculate_center_of_mass(struct)#
+

Calculates the center of mass of the slab.

+
+ +
+
+data.oc.core.slab.calculate_coordination_of_bulk_atoms(bulk_atoms)#
+

Finds all unique atoms in a bulk structure and then determines their +coordination number. Then parses these coordination numbers into a +dictionary whose keys are the elements of the atoms and whose values are +their possible coordination numbers. +For example: bulk_cns = {‘Pt’: {3., 12.}, ‘Pd’: {12.}}

+
+
Parameters:
+

bulk_atoms (ase.Atoms) – The bulk structure.

+
+
Returns:
+

bulk_cn_dict – A dictionary whose keys are the elements of the atoms and whose values +are their possible coordination numbers.

+
+
Return type:
+

dict

+
+
+
+ +
+
+data.oc.core.slab.compute_slabs(bulk_atoms: ase.Atoms = None, max_miller: int = 2, specific_millers: list = None)#
+

Enumerates all the symmetrically distinct slabs of a bulk structure. +It will not enumerate slabs with Miller indices above the +max_miller argument. Note that we also look at the bottoms of slabs +if they are distinct from the top. If they are distinct, we flip the +surface so the bottom is pointing upwards.

+
+
Parameters:
+
    +
  • bulk_atoms (ase.Atoms) – The bulk structure.

  • +
  • max_miller (int) – The maximum Miller index of the slabs to enumerate. Increasing this +argument will increase the number of slabs, and the slabs will generally +become larger.

  • +
  • specific_millers (list) – A list of Miller indices that you want to enumerate. If this argument +is not None, then the max_miller argument is ignored.

  • +
+
+
Returns:
+

all_slabs_info – A list of 5-tuples containing pymatgen structure objects for enumerated +slabs, the Miller indices, floats for the shifts, booleans for top, and +the oriented bulk structure.

+
+
Return type:
+

list

+
+
+
+ +
+
+data.oc.core.slab.flip_struct(struct: pymatgen.core.structure.Structure)#
+

Flips an atoms object upside down. Normally used to flip slabs.

+
+
Parameters:
+

struct (Structure) – pymatgen structure object of the surface you want to flip

+
+
Returns:
+

flipped_struct – pymatgen structure object of the flipped surface.

+
+
Return type:
+

Structure

+
+
+
+ +
+
+data.oc.core.slab.is_structure_invertible(struct: pymatgen.core.structure.Structure)#
+

This function figures out whether or not an Structure +object has symmetricity. In this function, the affine matrix is a rotation +matrix that is multiplied with the XYZ positions of the crystal. If the z,z +component of that is negative, it means symmetry operation exist, it could +be a mirror operation, or one that involves multiple rotations/etc. +Regardless, it means that the top becomes the bottom and vice-versa, and the +structure is the symmetric. i.e. structure_XYZ = structure_XYZ*M.

+

In short: If this function returns False, then the input structure can +be flipped in the z-direction to create a new structure.

+
+
Parameters:
+

struct (Structure) – pymatgen structure object of the slab.

+
+
Returns:
+

    +
  • A boolean indicating whether or not your ase.Atoms object is

  • +
  • symmetric in z-direction (i.e. symmetric with respect to x-y plane).

  • +
+

+
+
+
+ +
+
+data.oc.core.slab.standardize_bulk(atoms: ase.Atoms)#
+

There are many ways to define a bulk unit cell. If you change the unit +cell itself but also change the locations of the atoms within the unit +cell, you can effectively get the same bulk structure. To address this, +there is a standardization method used to reduce the degrees of freedom +such that each unit cell only has one “true” configuration. This +function will align a unit cell you give it to fit within this +standardization.

+
+
Parameters:
+

atoms (ase.Atoms) – ase.Atoms object of the bulk you want to standardize.

+
+
Returns:
+

standardized_struct – pymatgen structure object of the standardized bulk.

+
+
Return type:
+

Structure

+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/databases/index.html b/autoapi/data/oc/databases/index.html new file mode 100644 index 000000000..3402d84fb --- /dev/null +++ b/autoapi/data/oc/databases/index.html @@ -0,0 +1,769 @@ + + + + + + + + + + + data.oc.databases — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.databases

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.databases#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/databases/pkls/index.html b/autoapi/data/oc/databases/pkls/index.html new file mode 100644 index 000000000..0110727f9 --- /dev/null +++ b/autoapi/data/oc/databases/pkls/index.html @@ -0,0 +1,772 @@ + + + + + + + + + + + data.oc.databases.pkls — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.databases.pkls

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.databases.pkls#

+
+

Package Contents#

+
+
+data.oc.databases.pkls.BULK_PKL_PATH#
+
+ +
+
+data.oc.databases.pkls.ADSORBATES_PKL_PATH#
+
+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/databases/update/index.html b/autoapi/data/oc/databases/update/index.html new file mode 100644 index 000000000..ada366dc0 --- /dev/null +++ b/autoapi/data/oc/databases/update/index.html @@ -0,0 +1,813 @@ + + + + + + + + + + + data.oc.databases.update — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.databases.update

+ +
+ +
+
+ + + + +
+ +
+

data.oc.databases.update#

+

Script for updating ase pkl and db files from v3.19 to v3.21. +Run it with ase v3.19.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + +

pbc_patch(self)

set_pbc_patch(self, pbc)

update_pkls()

update_dbs()

+
+
+data.oc.databases.update.pbc_patch(self)#
+
+ +
+
+data.oc.databases.update.set_pbc_patch(self, pbc)#
+
+ +
+
+data.oc.databases.update.update_pkls()#
+
+ +
+
+data.oc.databases.update.update_dbs()#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/experimental/get_energies/index.html b/autoapi/data/oc/experimental/get_energies/index.html new file mode 100644 index 000000000..dbac47e8d --- /dev/null +++ b/autoapi/data/oc/experimental/get_energies/index.html @@ -0,0 +1,792 @@ + + + + + + + + + + + data.oc.experimental.get_energies — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.experimental.get_energies

+ +
+ +
+
+ + + + +
+ +
+

data.oc.experimental.get_energies#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

extract_file(zipname, file_to_unzip, extract_to)

process_func(indices, dirlist, ans)

+
+
+

Attributes#

+ + + + + + +

input_folder

+
+
+data.oc.experimental.get_energies.extract_file(zipname, file_to_unzip, extract_to)#
+
+ +
+
+data.oc.experimental.get_energies.process_func(indices, dirlist, ans)#
+
+ +
+
+data.oc.experimental.get_energies.input_folder = 'temp_download/'#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/experimental/merge_traj/index.html b/autoapi/data/oc/experimental/merge_traj/index.html new file mode 100644 index 000000000..5fc622a73 --- /dev/null +++ b/autoapi/data/oc/experimental/merge_traj/index.html @@ -0,0 +1,775 @@ + + + + + + + + + + + data.oc.experimental.merge_traj — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.experimental.merge_traj

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.experimental.merge_traj#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

extract_file(zipname, file_to_unzip, extract_to)

main()

Given a directory containing adsorbate subdirectories, loops through all

+
+
+data.oc.experimental.merge_traj.extract_file(zipname, file_to_unzip, extract_to)#
+
+ +
+
+data.oc.experimental.merge_traj.main()#
+

Given a directory containing adsorbate subdirectories, loops through all +runs and merges intermediate checkpoints into a single, full trajectory.

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/experimental/perturb_systems/index.html b/autoapi/data/oc/experimental/perturb_systems/index.html new file mode 100644 index 000000000..28bb94ed8 --- /dev/null +++ b/autoapi/data/oc/experimental/perturb_systems/index.html @@ -0,0 +1,766 @@ + + + + + + + + + + + data.oc.experimental.perturb_systems — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.experimental.perturb_systems

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.experimental.perturb_systems#

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

main()

Rattles every image along a relaxation pathway at 5 different variances.

+
+
+data.oc.experimental.perturb_systems.main()#
+

Rattles every image along a relaxation pathway at 5 different variances. +Rattled images are then put in their own directory along with the input +files necessary to run VASP calculations.

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/experimental/rattle_test/index.html b/autoapi/data/oc/experimental/rattle_test/index.html new file mode 100644 index 000000000..417ca925a --- /dev/null +++ b/autoapi/data/oc/experimental/rattle_test/index.html @@ -0,0 +1,765 @@ + + + + + + + + + + + data.oc.experimental.rattle_test — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.experimental.rattle_test

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.experimental.rattle_test#

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

main()

Checks whether ASE's rattle modifies fixed atoms.

+
+
+data.oc.experimental.rattle_test.main()#
+

Checks whether ASE’s rattle modifies fixed atoms. +‘

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/experimental/utils/index.html b/autoapi/data/oc/experimental/utils/index.html new file mode 100644 index 000000000..aa1bd0484 --- /dev/null +++ b/autoapi/data/oc/experimental/utils/index.html @@ -0,0 +1,800 @@ + + + + + + + + + + + data.oc.experimental.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.experimental.utils

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.experimental.utils#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

v0_check(full_traj, initial)

Checks whether the initial structure as gathered from the POSCAR input file

restart_bug_check(full_traj)

Observed that some of the trajectories had a strange identically cyclical

plot_traj(traj, fname)

Plots the energy profile of a given trajectory

+
+
+data.oc.experimental.utils.v0_check(full_traj, initial)#
+

Checks whether the initial structure as gathered from the POSCAR input file +is in agreement with the initial image of the full trajectory. If not, the +trajectory comes fro the V0 dataset which failed to save intermediate +checkpoints.

+

Args +full_traj (list of Atoms objects): Calculated full trajectory. +initial (Atoms object): Starting image provided by POSCAR..

+
+ +
+
+data.oc.experimental.utils.restart_bug_check(full_traj)#
+

Observed that some of the trajectories had a strange identically cyclical +behavior - suggesting that a checkpoint was restarted from an earlier +checkpoint rather than the latest. Checks whether the trajectory provided +falls within that bug.

+

Args +full_traj (list of Atoms objects): Calculated full trajectory.

+
+ +
+
+data.oc.experimental.utils.plot_traj(traj, fname)#
+

Plots the energy profile of a given trajectory

+

Args +traj (list of Atoms objects): Full trajectory to be plotted +fname (str): Filename to be used as title and save figure as.

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/index.html b/autoapi/data/oc/index.html new file mode 100644 index 000000000..ef3d651c2 --- /dev/null +++ b/autoapi/data/oc/index.html @@ -0,0 +1,795 @@ + + + + + + + + + + + data.oc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc#

+
+

Subpackages#

+ +
+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/scripts/precompute_sample_structures/index.html b/autoapi/data/oc/scripts/precompute_sample_structures/index.html new file mode 100644 index 000000000..7689e9fd5 --- /dev/null +++ b/autoapi/data/oc/scripts/precompute_sample_structures/index.html @@ -0,0 +1,937 @@ + + + + + + + + + + + data.oc.scripts.precompute_sample_structures — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.scripts.precompute_sample_structures#

+

This submodule contains the scripts that the we used to sample the adsorption +structures.

+

Note that some of these scripts were taken from +[GASpy](ulissigroup/GASpy) with permission of author.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

enumerate_surfaces_for_saving(bulk_atoms[, max_miller])

Enumerate all the symmetrically distinct surfaces of a bulk structure. It

standardize_bulk(atoms)

There are many ways to define a bulk unit cell. If you change the unit cell

is_structure_invertible(structure)

This function figures out whether or not an pymatgen.Structure object has

flip_struct(struct)

Flips an atoms object upside down. Normally used to flip surfaces.

precompute_enumerate_surface(bulk_database, ...)

+
+
+

Attributes#

+ + + + + + + + + + + + + + + + + + +

__authors__

__email__

BULK_PKL

MAX_MILLER

s

+
+
+data.oc.scripts.precompute_sample_structures.__authors__ = ['Kevin Tran', 'Aini Palizhati', 'Siddharth Goyal', 'Zachary Ulissi']#
+
+ +
+
+data.oc.scripts.precompute_sample_structures.__email__ = ['ktran@andrew.cmu.edu']#
+
+ +
+
+data.oc.scripts.precompute_sample_structures.BULK_PKL = '/fill/this/in/with/path/to/bulk/pkl/file'#
+
+ +
+
+data.oc.scripts.precompute_sample_structures.MAX_MILLER = 2#
+
+ +
+
+data.oc.scripts.precompute_sample_structures.enumerate_surfaces_for_saving(bulk_atoms, max_miller=MAX_MILLER)#
+

Enumerate all the symmetrically distinct surfaces of a bulk structure. It +will not enumerate surfaces with Miller indices above the max_miller +argument. Note that we also look at the bottoms of surfaces if they are +distinct from the top. If they are distinct, we flip the surface so the bottom +is pointing upwards.

+
+
Parameters:
+
    +
  • enumerate (bulk_atoms ase.Atoms object of the bulk you want to) – surfaces from.

  • +
  • surfaces (max_miller An integer indicating the maximum Miller index of the) – you are willing to enumerate. Increasing this argument will +increase the number of surfaces, but the surfaces will +generally become larger.

  • +
+
+
Returns:
+

+
pymatgen.Structure

objects for surfaces we have enumerated, the Miller +indices, floats for the shifts, and Booleans for “top”.

+
+
+

+
+
Return type:
+

all_slabs_info A list of 4-tuples containing

+
+
+
+ +
+
+data.oc.scripts.precompute_sample_structures.standardize_bulk(atoms)#
+

There are many ways to define a bulk unit cell. If you change the unit cell +itself but also change the locations of the atoms within the unit cell, you +can get effectively the same bulk structure. To address this, there is a +standardization method used to reduce the degrees of freedom such that each +unit cell only has one “true” configuration. This function will align a +unit cell you give it to fit within this standardization.

+
+
Arg:

atoms ase.Atoms object of the bulk you want to standardize

+
+
+
+
Returns:
+

standardized_struct pymatgen.Structure of the standardized bulk

+
+
+
+ +
+
+data.oc.scripts.precompute_sample_structures.is_structure_invertible(structure)#
+

This function figures out whether or not an pymatgen.Structure object has +symmetricity. In this function, the affine matrix is a rotation matrix that +is multiplied with the XYZ positions of the crystal. If the z,z component +of that is negative, it means symmetry operation exist, it could be a +mirror operation, or one that involves multiple rotations/etc. Regardless, +it means that the top becomes the bottom and vice-versa, and the structure +is the symmetric. i.e. structure_XYZ = structure_XYZ*M.

+

In short: If this function returns False, then the input structure can +be flipped in the z-direction to create a new structure.

+
+
Arg:

structure A pymatgen.Structure object.

+
+
Returns

A boolean indicating whether or not your ase.Atoms object is +symmetric in z-direction (i.e. symmetric with respect to x-y plane).

+
+
+
+ +
+
+data.oc.scripts.precompute_sample_structures.flip_struct(struct)#
+

Flips an atoms object upside down. Normally used to flip surfaces.

+
+
Arg:

atoms pymatgen.Structure object

+
+
+
+
Returns:
+

+
flipped_struct The same ase.Atoms object that was fed as an

argument, but flipped upside down.

+
+
+

+
+
+
+ +
+
+data.oc.scripts.precompute_sample_structures.precompute_enumerate_surface(bulk_database, bulk_index, opfile)#
+
+ +
+
+data.oc.scripts.precompute_sample_structures.s#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/structure_generator/index.html b/autoapi/data/oc/structure_generator/index.html new file mode 100644 index 000000000..39377615b --- /dev/null +++ b/autoapi/data/oc/structure_generator/index.html @@ -0,0 +1,915 @@ + + + + + + + + + + + data.oc.structure_generator — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.structure_generator#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

StructureGenerator

A class that creates adsorbate/bulk/slab objects given specified indices,

+
+
+

Functions#

+ + + + + + + + + + + + + + + +

write_surface(args, slab, bulk_index, surface_index)

Writes vasp inputs and metadata for a specified slab

parse_args()

precompute_slabs(bulk_ind)

run_placements(inputs)

+
+
+

Attributes#

+ + + + + + +

args

+
+
+class data.oc.structure_generator.StructureGenerator(args, bulk_index, surface_index, adsorbate_index)#
+

A class that creates adsorbate/bulk/slab objects given specified indices, +and writes vasp input files and metadata for multiple placements of the adsorbate +on the slab. You can choose random, heuristic, or both types of placements.

+

The output directory structure will have the following nested structure, +where “files” represents the vasp input files and the metadata.pkl:

+
+
+
outputdir/
+
bulk0/
+
surface0/

surface/files +ads0/

+
+

heur0/files +heur1/files +rand0/files +…

+
+
+
ads1/

+
+
+
+
surface1/

+
+
+
+
bulk1/

+
+
+
+
+
+

Precomputed surfaces will be calculated and saved out if they don’t +already exist in the provided directory.

+
+
Parameters:
+
    +
  • args (argparse.Namespace) – Contains all command line args

  • +
  • bulk_index (int) – Index of the bulk within the bulk db

  • +
  • surface_index (int) – Index of the surface in the list of all possible surfaces

  • +
  • adsorbate_index (int) – Index of the adsorbate within the adsorbate db

  • +
+
+
+
+
+run()#
+

Create adsorbate/bulk/surface objects, generate adslab placements, +and write to files.

+
+ +
+
+_write_adslabs(adslab_obj, mode_str)#
+

Write one set of adslabs (called separately for random and heurstic placements)

+
+ +
+ +
+
+data.oc.structure_generator.write_surface(args, slab, bulk_index, surface_index)#
+

Writes vasp inputs and metadata for a specified slab

+
+ +
+
+data.oc.structure_generator.parse_args()#
+
+ +
+
+data.oc.structure_generator.precompute_slabs(bulk_ind)#
+
+ +
+
+data.oc.structure_generator.run_placements(inputs)#
+
+ +
+
+data.oc.structure_generator.args#
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/index.html b/autoapi/data/oc/tests/index.html new file mode 100644 index 000000000..816d90a0d --- /dev/null +++ b/autoapi/data/oc/tests/index.html @@ -0,0 +1,764 @@ + + + + + + + + + + + data.oc.tests — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.tests

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.tests#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.html b/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.html new file mode 100644 index 000000000..0acf4ea6d --- /dev/null +++ b/autoapi/data/oc/tests/old_tests/check_energy_and_forces/index.html @@ -0,0 +1,851 @@ + + + + + + + + + + + data.oc.tests.old_tests.check_energy_and_forces — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.old_tests.check_energy_and_forces#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + +

check_relaxed_forces(sid, path, thres)

Check all forces in the final frame of adslab is less than a threshold.

check_adsorption_energy(sid, path, ref_energy, ...)

check_DFT_energy(sid, path[, e_tol])

Given a relaxation trajectory, check to see if 1. final energy is less than the initial

check_positions_across_frames_are_different(sid, path)

Given a relaxation trajectory, make sure positions for two consecutive

read_pkl(fname)

run_checks(args)

create_parser()

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+data.oc.tests.old_tests.check_energy_and_forces.check_relaxed_forces(sid, path, thres)#
+

Check all forces in the final frame of adslab is less than a threshold.

+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.check_adsorption_energy(sid, path, ref_energy, adsorption_energy)#
+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.check_DFT_energy(sid, path, e_tol=0.05)#
+

Given a relaxation trajectory, check to see if 1. final energy is less than the initial +energy, raise error if not. 2) If the energy decreases throuhghout a trajectory (small spikes are okay). +And 3) if 2 fails, check if it’s just a matter of tolerance being too strict by +considering only the first quarter of the trajectory and sampling every 10th frame +to check for _almost_ monotonic decrease in energies. +If any frame(i+1) energy is higher than frame(i) energy, flag it and plot the trajectory.

+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.check_positions_across_frames_are_different(sid, path)#
+

Given a relaxation trajectory, make sure positions for two consecutive +frames are not identical.

+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.read_pkl(fname)#
+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.run_checks(args)#
+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.create_parser()#
+
+ +
+
+data.oc.tests.old_tests.check_energy_and_forces.parser#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/old_tests/check_inputs/index.html b/autoapi/data/oc/tests/old_tests/check_inputs/index.html new file mode 100644 index 000000000..b19ac6e51 --- /dev/null +++ b/autoapi/data/oc/tests/old_tests/check_inputs/index.html @@ -0,0 +1,879 @@ + + + + + + + + + + + data.oc.tests.old_tests.check_inputs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.tests.old_tests.check_inputs

+ +
+ +
+
+ + + + +
+ +
+

data.oc.tests.old_tests.check_inputs#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + +

obtain_metadata(input_dir, split)

Get the metadata provided input directory and split of data.

create_df(metadata_lst[, df_name])

Create a df from metadata to used check_dataset.py file

adslabs_are_unique(df[, unique_by])

Test if there are duplicate adslabs given a df. If the input is another

check_commonelems(df, split1, split2[, check])

Given a df containing all the metadata of the calculations, check to see if there are

is_adsorbate_placed_correct(adslab_input, atoms_tag)

Make sure all adsorbate atoms are connected after placement.

_get_connectivity(atoms)

Generate the connectivity of an atoms obj.

+
+
+data.oc.tests.old_tests.check_inputs.obtain_metadata(input_dir, split)#
+

Get the metadata provided input directory and split of data. +:param input_dir: +:type input_dir: str +:param split: ‘val_ood_cat/ads/both’, and ‘test_ood_cat/ads/both’. +:type split: str

+
+
Returns:
+

+
metadata (tuple) adslab properties.

Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

+
+
+

+
+
+
+ +
+
+data.oc.tests.old_tests.check_inputs.create_df(metadata_lst, df_name=None)#
+

Create a df from metadata to used check_dataset.py file +:param metadata_lst A list of adslab properties in tuple form: contain (mpid, miller index, shift, top, adsorbate smile string,

+
+

adsorption cartesion coordinates tuple, and which split the data belongs to). +Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

+
+
+
Parameters:
+

should (each tuple) – contain (mpid, miller index, shift, top, adsorbate smile string, +adsorption cartesion coordinates tuple, and which split the data belongs to). +Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

+
+
Returns:
+

df A pandas DataFrame

+
+
+
+ +
+
+data.oc.tests.old_tests.check_inputs.adslabs_are_unique(df, unique_by=['mpid', 'miller', 'shift', 'top', 'adsorbate', 'adsorption_site'])#
+

Test if there are duplicate adslabs given a df. If the input is another +format, convert it to df first. +:param df A pd.DataFrame containing metadata of the adslabs being checked.: +:param unique_by df column names that are used to detect duplicates. The default: list is the fingerprints represent a unique adslab.

+
+ +
+
+data.oc.tests.old_tests.check_inputs.check_commonelems(df, split1, split2, check='adsorbate')#
+

Given a df containing all the metadata of the calculations, check to see if there are +any bulk or adsorbate duplicates between train and val/test_ood. The dataframe should +have a “split_tag” column indicate which split (i.e. train, val_ood_ads, etc) a data belongs to. +:param df A pd.DataFrame containing metadata of the adslabs being checked.: +:param split1: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. +:param split2 two of the splits from ‘train’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. +:param ‘val_id’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. +:param ‘test_id’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. +:param : ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’.

+
+ +
+
+data.oc.tests.old_tests.check_inputs.is_adsorbate_placed_correct(adslab_input, atoms_tag)#
+

Make sure all adsorbate atoms are connected after placement. +False means there is at least one isolated adsorbate atom. +It should be used after input generation but before DFT to avoid +unneccessarily computations. +:param adslab_input ase.Atoms of the structure in its initial state: +:param atoms_tag: +:type atoms_tag: list

+
+
Returns:
+

+
boolean If there is any stand alone adsorbate atoms after placement,

return False.

+
+
+

+
+
+
+ +
+
+data.oc.tests.old_tests.check_inputs._get_connectivity(atoms)#
+

Generate the connectivity of an atoms obj. +:param atoms An ase.Atoms object:

+
+
Returns:
+

matrix The connectivity matrix of the atoms object.

+
+
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html b/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html new file mode 100644 index 000000000..13262ef76 --- /dev/null +++ b/autoapi/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html @@ -0,0 +1,824 @@ + + + + + + + + + + + data.oc.tests.old_tests.compare_inputs_and_trajectory — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.tests.old_tests.compare_inputs_and_trajectory

+ +
+ +
+
+ + + + +
+ +
+

data.oc.tests.old_tests.compare_inputs_and_trajectory#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + +

get_starting_structure_from_input_dir(input_dir)

min_diff(atoms_init, atoms_final)

Calculate atom wise distances of two atoms object,

compare(args)

read_pkl(fname)

create_parser()

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.get_starting_structure_from_input_dir(input_dir)#
+
+ +
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.min_diff(atoms_init, atoms_final)#
+

Calculate atom wise distances of two atoms object, +taking into account periodic boundary conditions.

+
+ +
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.compare(args)#
+
+ +
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.read_pkl(fname)#
+
+ +
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.create_parser()#
+
+ +
+
+data.oc.tests.old_tests.compare_inputs_and_trajectory.parser#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/old_tests/verify_correctness/index.html b/autoapi/data/oc/tests/old_tests/verify_correctness/index.html new file mode 100644 index 000000000..d142bbaac --- /dev/null +++ b/autoapi/data/oc/tests/old_tests/verify_correctness/index.html @@ -0,0 +1,802 @@ + + + + + + + + + + + data.oc.tests.old_tests.verify_correctness — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.tests.old_tests.verify_correctness

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.oc.tests.old_tests.verify_correctness#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

compare_runs(path1, path2, reference_type, tol)

create_parser()

main(args)

+
+
+

Attributes#

+ + + + + + +

parser

+
+
+data.oc.tests.old_tests.verify_correctness.compare_runs(path1, path2, reference_type, tol)#
+
+ +
+
+data.oc.tests.old_tests.verify_correctness.create_parser()#
+
+ +
+
+data.oc.tests.old_tests.verify_correctness.main(args)#
+
+ +
+
+data.oc.tests.old_tests.verify_correctness.parser#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_adsorbate/index.html b/autoapi/data/oc/tests/test_adsorbate/index.html new file mode 100644 index 000000000..56cf2f72c --- /dev/null +++ b/autoapi/data/oc/tests/test_adsorbate/index.html @@ -0,0 +1,870 @@ + + + + + + + + + + + data.oc.tests.test_adsorbate — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.test_adsorbate#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestAdsorbate

+
+
+

Attributes#

+ + + + + + + + + +

_test_db

_test_db_old

+
+
+data.oc.tests.test_adsorbate._test_db#
+
+ +
+
+data.oc.tests.test_adsorbate._test_db_old#
+
+ +
+
+class data.oc.tests.test_adsorbate.TestAdsorbate#
+
+
+test_adsorbate_init_from_id()#
+
+ +
+
+test_adsorbate_init_from_smiles()#
+
+ +
+
+test_adsorbate_init_random()#
+
+ +
+
+test_adsorbate_init_from_id_with_db()#
+
+ +
+
+test_adsorbate_init_from_smiles_with_db()#
+
+ +
+
+test_adsorbate_init_random_with_db()#
+
+ +
+
+test_adsorbate_init_reaction_string()#
+
+ +
+
+test_adsorbate_init_reaction_string_with_old_db()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_adsorbate_slab_config/index.html b/autoapi/data/oc/tests/test_adsorbate_slab_config/index.html new file mode 100644 index 000000000..85aff4839 --- /dev/null +++ b/autoapi/data/oc/tests/test_adsorbate_slab_config/index.html @@ -0,0 +1,840 @@ + + + + + + + + + + + data.oc.tests.test_adsorbate_slab_config — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.test_adsorbate_slab_config#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestAdslab

+
+
+

Functions#

+ + + + + + +

load_data(request)

+
+
+data.oc.tests.test_adsorbate_slab_config.load_data(request)#
+
+ +
+
+class data.oc.tests.test_adsorbate_slab_config.TestAdslab#
+
+
+test_adslab_init()#
+
+ +
+
+test_num_augmentations_per_site()#
+
+ +
+
+test_placement_overlap()#
+

Test that the adsorbate does not overlap with the slab.

+
+ +
+
+test_is_adsorbate_com_on_normal()#
+
+ +
+
+test_is_adsorbate_binding_atom_on_normal()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_bulk/index.html b/autoapi/data/oc/tests/test_bulk/index.html new file mode 100644 index 000000000..2d507cf62 --- /dev/null +++ b/autoapi/data/oc/tests/test_bulk/index.html @@ -0,0 +1,893 @@ + + + + + + + + + + + data.oc.tests.test_bulk — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.test_bulk#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestBulk

+
+
+

Functions#

+ + + + + + +

load_bulk(request)

+
+
+

Attributes#

+ + + + + + +

_test_db

+
+
+data.oc.tests.test_bulk.load_bulk(request)#
+
+ +
+
+data.oc.tests.test_bulk._test_db#
+
+ +
+
+class data.oc.tests.test_bulk.TestBulk#
+
+
+test_bulk_init_from_id()#
+
+ +
+
+test_bulk_init_from_src_id()#
+
+ +
+
+test_bulk_init_random()#
+
+ +
+
+test_bulk_init_from_id_with_db()#
+
+ +
+
+test_bulk_init_from_src_id_with_db()#
+
+ +
+
+test_bulk_init_random_with_db()#
+
+ +
+
+test_unique_slab_enumeration()#
+
+ +
+
+test_precomputed_slab()#
+
+ +
+
+test_slab_miller_enumeration()#
+
+ +
+
+get_max_miller(slabs)#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_inputs/index.html b/autoapi/data/oc/tests/test_inputs/index.html new file mode 100644 index 000000000..7a7531dba --- /dev/null +++ b/autoapi/data/oc/tests/test_inputs/index.html @@ -0,0 +1,818 @@ + + + + + + + + + + + data.oc.tests.test_inputs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.oc.tests.test_inputs

+ +
+ +
+
+ + + + +
+ +
+

data.oc.tests.test_inputs#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestVasp

+
+
+

Functions#

+ + + + + + +

load_data(request)

+
+
+data.oc.tests.test_inputs.load_data(request)#
+
+ +
+
+class data.oc.tests.test_inputs.TestVasp#
+
+
+test_cleanup()#
+
+ +
+
+test_unique_kpts()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.html b/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.html new file mode 100644 index 000000000..84565f4d3 --- /dev/null +++ b/autoapi/data/oc/tests/test_multi_adsorbate_slab_config/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + data.oc.tests.test_multi_adsorbate_slab_config — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.test_multi_adsorbate_slab_config#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestMultiAdslab

+
+
+

Functions#

+ + + + + + +

load_data(request)

+
+
+data.oc.tests.test_multi_adsorbate_slab_config.load_data(request)#
+
+ +
+
+class data.oc.tests.test_multi_adsorbate_slab_config.TestMultiAdslab#
+
+
+test_num_configurations()#
+
+ +
+
+test_adsorbate_indices()#
+

Test that the adsorbate indices correspond to the unique adsorbates.

+
+ +
+
+test_placement_overlap()#
+

Test that the adsorbate sites do not overlap with each other.

+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/tests/test_slab/index.html b/autoapi/data/oc/tests/test_slab/index.html new file mode 100644 index 000000000..6254fba40 --- /dev/null +++ b/autoapi/data/oc/tests/test_slab/index.html @@ -0,0 +1,806 @@ + + + + + + + + + + + data.oc.tests.test_slab — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.tests.test_slab#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

TestSlab

+
+
+class data.oc.tests.test_slab.TestSlab#
+
+
+test_slab_init_from_id()#
+
+ +
+
+test_slab_init_from_specific_millers()#
+
+ +
+
+test_slab_init_random()#
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/utils/flag_anomaly/index.html b/autoapi/data/oc/utils/flag_anomaly/index.html new file mode 100644 index 000000000..15bfa504b --- /dev/null +++ b/autoapi/data/oc/utils/flag_anomaly/index.html @@ -0,0 +1,875 @@ + + + + + + + + + + + data.oc.utils.flag_anomaly — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.utils.flag_anomaly#

+
+

Module Contents#

+
+

Classes#

+ + + + + + +

DetectTrajAnomaly

+
+
+class data.oc.utils.flag_anomaly.DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5)#
+
+
+is_adsorbate_dissociated()#
+

Tests if the initial adsorbate connectivity is maintained.

+
+
Returns:
+

True if the connectivity was not maintained, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+has_surface_changed()#
+

Tests bond breaking / forming events within a tolerance on the surface so +that systems with significant adsorbate induces surface changes may be discarded +since the reference to the relaxed slab may no longer be valid.

+
+
Returns:
+

True if the surface is reconstructed, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+is_adsorbate_desorbed()#
+

If the adsorbate binding atoms have no connection with slab atoms, +consider it desorbed.

+
+
Returns:
+

True if there is desorption, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+_get_connectivity(atoms, cutoff_multiplier=1.0)#
+

Generate the connectivity of an atoms obj.

+
+
Parameters:
+
    +
  • atoms (ase.Atoms) – object which will have its connectivity considered

  • +
  • cutoff_multiplier (float, optional) – cushion for small atom movements when assessing +atom connectivity

  • +
+
+
Returns:
+

The connectivity matrix of the atoms object.

+
+
Return type:
+

(np.ndarray)

+
+
+
+ +
+
+is_adsorbate_intercalated()#
+

Ensure the adsorbate isn’t interacting with an atom that is not allowed to relax.

+
+
Returns:
+

True if any adsorbate atom neighbors a frozen atom, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/utils/index.html b/autoapi/data/oc/utils/index.html new file mode 100644 index 000000000..ea0bc6074 --- /dev/null +++ b/autoapi/data/oc/utils/index.html @@ -0,0 +1,886 @@ + + + + + + + + + + + data.oc.utils — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.utils#

+
+

Submodules#

+ +
+
+

Package Contents#

+
+

Classes#

+ + + + + + +

DetectTrajAnomaly

+
+
+class data.oc.utils.DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5)#
+
+
+is_adsorbate_dissociated()#
+

Tests if the initial adsorbate connectivity is maintained.

+
+
Returns:
+

True if the connectivity was not maintained, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+has_surface_changed()#
+

Tests bond breaking / forming events within a tolerance on the surface so +that systems with significant adsorbate induces surface changes may be discarded +since the reference to the relaxed slab may no longer be valid.

+
+
Returns:
+

True if the surface is reconstructed, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+is_adsorbate_desorbed()#
+

If the adsorbate binding atoms have no connection with slab atoms, +consider it desorbed.

+
+
Returns:
+

True if there is desorption, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+
+_get_connectivity(atoms, cutoff_multiplier=1.0)#
+

Generate the connectivity of an atoms obj.

+
+
Parameters:
+
    +
  • atoms (ase.Atoms) – object which will have its connectivity considered

  • +
  • cutoff_multiplier (float, optional) – cushion for small atom movements when assessing +atom connectivity

  • +
+
+
Returns:
+

The connectivity matrix of the atoms object.

+
+
Return type:
+

(np.ndarray)

+
+
+
+ +
+
+is_adsorbate_intercalated()#
+

Ensure the adsorbate isn’t interacting with an atom that is not allowed to relax.

+
+
Returns:
+

True if any adsorbate atom neighbors a frozen atom, otherwise False

+
+
Return type:
+

(bool)

+
+
+
+ +
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/oc/utils/vasp/index.html b/autoapi/data/oc/utils/vasp/index.html new file mode 100644 index 000000000..ce6b8fc40 --- /dev/null +++ b/autoapi/data/oc/utils/vasp/index.html @@ -0,0 +1,894 @@ + + + + + + + + + + + data.oc.utils.vasp — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.oc.utils.vasp#

+

This submodule contains the scripts that the we used to run VASP.

+

Note that some of these scripts were taken and modified from +[GASpy](ulissigroup/GASpy) with permission of authors.

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

_clean_up_inputs(atoms, vasp_flags)

Parses the inputs and makes sure some things are straightened out.

calculate_surface_k_points(atoms)

For surface calculations, it's a good practice to calculate the k-point

write_vasp_input_files(atoms[, outdir, vasp_flags])

Effectively goes through the same motions as the run_vasp function,

+
+
+

Attributes#

+ + + + + + + + + + + + + + + +

__author__

__email__

VASP_FLAGS

BULK_VASP_FLAGS

+
+
+data.oc.utils.vasp.__author__ = 'Kevin Tran'#
+
+ +
+
+data.oc.utils.vasp.__email__ = 'ktran@andrew.cmu.edu'#
+
+ +
+
+data.oc.utils.vasp.VASP_FLAGS#
+
+ +
+
+data.oc.utils.vasp.BULK_VASP_FLAGS#
+
+ +
+
+data.oc.utils.vasp._clean_up_inputs(atoms, vasp_flags)#
+

Parses the inputs and makes sure some things are straightened out.

+
+
Arg:

atoms ase.Atoms object of the structure we want to relax +vasp_flags A dictionary of settings we want to pass to the Vasp

+
+

calculator

+
+
+
+
+
Returns:
+

+
atoms ase.Atoms object of the structure we want to relax, but

with the unit vectors fixed (if needed)

+
+
+

vasp_flags A modified version of the ‘vasp_flags’ argument

+

+
+
+
+ +
+
+data.oc.utils.vasp.calculate_surface_k_points(atoms)#
+

For surface calculations, it’s a good practice to calculate the k-point +mesh given the unit cell size. We do that on-the-spot here.

+
+
Arg:

atoms ase.Atoms object of the structure we want to relax

+
+
+
+
Returns:
+

k_pts A 3-tuple of integers indicating the k-point mesh to use

+
+
+
+ +
+
+data.oc.utils.vasp.write_vasp_input_files(atoms, outdir='.', vasp_flags=None)#
+

Effectively goes through the same motions as the run_vasp function, +except it only writes the input files instead of running.

+
+
Parameters:
+
    +
  • relax. (atoms ase.Atoms object that we want to)

  • +
  • files. (outdir A string indicating where you want to save the input) – Defaults to ‘.’

  • +
  • Vasp (vasp_flags A dictionary of settings we want to pass to the) – calculator. Defaults to a standerd set of values if None

  • +
+
+
+
+ +
+
+
+ + + + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/odac/force_field/FF_analysis/index.html b/autoapi/data/odac/force_field/FF_analysis/index.html new file mode 100644 index 000000000..07a16690e --- /dev/null +++ b/autoapi/data/odac/force_field/FF_analysis/index.html @@ -0,0 +1,862 @@ + + + + + + + + + + + data.odac.force_field.FF_analysis — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.odac.force_field.FF_analysis

+ +
+ +
+
+ + + + +
+ +
+

data.odac.force_field.FF_analysis#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

get_data(infile[, limit])

binned_average(DFT_ads, pred_err, bins)

bin_plot(ax, bins, heights, **kwargs)

get_Fig4a(raw_error_CO2, raw_error_H2O[, b, outfile])

get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O)

get_Fig4c(DFT_CO2, err_CO2[, outfile])

get_Fig4d(DFT_H2O, err_H2O[, outfile])

phys_err(DFT, FF)

chem_err(DFT, FF)

+
+
+

Attributes#

+ + + + + + +

infile

+
+
+data.odac.force_field.FF_analysis.get_data(infile, limit=2)#
+
+ +
+
+data.odac.force_field.FF_analysis.binned_average(DFT_ads, pred_err, bins)#
+
+ +
+
+data.odac.force_field.FF_analysis.bin_plot(ax, bins, heights, **kwargs)#
+
+ +
+
+data.odac.force_field.FF_analysis.get_Fig4a(raw_error_CO2, raw_error_H2O, b=20, outfile='Fig5a.png')#
+
+ +
+
+data.odac.force_field.FF_analysis.get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O, outfile='Fig5b.png')#
+
+ +
+
+data.odac.force_field.FF_analysis.get_Fig4c(DFT_CO2, err_CO2, outfile='Fig5c.png')#
+
+ +
+
+data.odac.force_field.FF_analysis.get_Fig4d(DFT_H2O, err_H2O, outfile='Fig5d.png')#
+
+ +
+
+data.odac.force_field.FF_analysis.phys_err(DFT, FF)#
+
+ +
+
+data.odac.force_field.FF_analysis.chem_err(DFT, FF)#
+
+ +
+
+data.odac.force_field.FF_analysis.infile = '/storage/home/hcoda1/8/lbrabson3/p-amedford6-0/s2ef/final/data_w_oms.json'#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/odac/index.html b/autoapi/data/odac/index.html new file mode 100644 index 000000000..cbaf6b8c9 --- /dev/null +++ b/autoapi/data/odac/index.html @@ -0,0 +1,728 @@ + + + + + + + + + + + data.odac — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.odac

+ +
+
+ +
+
+
+ + + + +
+ +
+

data.odac#

+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.html b/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.html new file mode 100644 index 000000000..8a9b95da4 --- /dev/null +++ b/autoapi/data/odac/promising_mof/promising_mof_energies/energy/index.html @@ -0,0 +1,1083 @@ + + + + + + + + + + + data.odac.promising_mof.promising_mof_energies.energy — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.odac.promising_mof.promising_mof_energies.energy#

+
+

Module Contents#

+
+
+data.odac.promising_mof.promising_mof_energies.energy.raw_ads_energy_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.temp_split_string#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_h2o#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co_ads#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co_ads_2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_h2o#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co_ads#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co_ads_2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_h2o#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_2#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.count = 0#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co2_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_h2o_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_2_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.current_entry#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.unique_combinations_count#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.def_counts_df#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.mof_name#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC_pristine#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.index_drop_ddec_pristine = []#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.index_drop_ddec_defective = []#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.promising_pristine#
+
+ +
+
+data.odac.promising_mof.promising_mof_energies.energy.promising_defective#
+
+ +
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.html b/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.html new file mode 100644 index 000000000..f1a56e133 --- /dev/null +++ b/autoapi/data/om/biomolecules/geom/sample_geom_drugs/index.html @@ -0,0 +1,783 @@ + + + + + + + + + + + data.om.biomolecules.geom.sample_geom_drugs — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.om.biomolecules.geom.sample_geom_drugs

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.om.biomolecules.geom.sample_geom_drugs#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + + + + +

write_pickle(data, path)

parse_args()

main()

+
+
+data.om.biomolecules.geom.sample_geom_drugs.write_pickle(data, path)#
+
+ +
+
+data.om.biomolecules.geom.sample_geom_drugs.parse_args()#
+
+ +
+
+data.om.biomolecules.geom.sample_geom_drugs.main()#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.html b/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.html new file mode 100644 index 000000000..517c6ea6a --- /dev/null +++ b/autoapi/data/om/biomolecules/geom/write_geom_drugs_structures/index.html @@ -0,0 +1,773 @@ + + + + + + + + + + + data.om.biomolecules.geom.write_geom_drugs_structures — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.om.biomolecules.geom.write_geom_drugs_structures

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.om.biomolecules.geom.write_geom_drugs_structures#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

parse_args()

main()

+
+
+data.om.biomolecules.geom.write_geom_drugs_structures.parse_args()#
+
+ +
+
+data.om.biomolecules.geom.write_geom_drugs_structures.main()#
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/index.html b/autoapi/data/om/index.html new file mode 100644 index 000000000..7b22eb0ed --- /dev/null +++ b/autoapi/data/om/index.html @@ -0,0 +1,728 @@ + + + + + + + + + + + data.om — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.om

+ +
+
+ +
+
+
+ + + + +
+ +
+

data.om#

+
+ + + + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/omdata/orca/calc/index.html b/autoapi/data/om/omdata/orca/calc/index.html new file mode 100644 index 000000000..7e03558ec --- /dev/null +++ b/autoapi/data/om/omdata/orca/calc/index.html @@ -0,0 +1,832 @@ + + + + + + + + + + + data.om.omdata.orca.calc — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

data.om.omdata.orca.calc#

+
+

Module Contents#

+
+

Functions#

+ + + + + + +

write_orca_inputs(atoms, output_directory[, charge, ...])

One-off method to be used if you wanted to write inputs for an arbitrary

+
+
+

Attributes#

+ + + + + + + + + + + + + + + + + + + + + +

ORCA_FUNCTIONAL

ORCA_BASIS

ORCA_SIMPLE_INPUT

ORCA_BLOCKS

ORCA_ASE_SIMPLE_INPUT

OPT_PARAMETERS

+
+
+data.om.omdata.orca.calc.ORCA_FUNCTIONAL = 'wB97M-V'#
+
+ +
+
+data.om.omdata.orca.calc.ORCA_BASIS = 'def2-TZVPD'#
+
+ +
+
+data.om.omdata.orca.calc.ORCA_SIMPLE_INPUT = ['EnGrad', 'RIJCOSX', 'def2/J', 'NoUseSym', 'DIIS', 'NOSOSCF', 'NormalConv', 'DEFGRID3', 'ALLPOP', 'NBO']#
+
+ +
+
+data.om.omdata.orca.calc.ORCA_BLOCKS = ['%scf Convergence Tight maxiter 300 end', '%elprop Dipole true Quadrupole true end', '%nbo...#
+
+ +
+
+data.om.omdata.orca.calc.ORCA_ASE_SIMPLE_INPUT#
+
+ +
+
+data.om.omdata.orca.calc.OPT_PARAMETERS#
+
+ +
+
+data.om.omdata.orca.calc.write_orca_inputs(atoms, output_directory, charge=0, mult=1, orcasimpleinput=ORCA_ASE_SIMPLE_INPUT, orcablocks=' '.join(ORCA_BLOCKS))#
+

One-off method to be used if you wanted to write inputs for an arbitrary +system. Primarily used for debugging.

+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/omdata/orca/index.html b/autoapi/data/om/omdata/orca/index.html new file mode 100644 index 000000000..8c6f03349 --- /dev/null +++ b/autoapi/data/om/omdata/orca/index.html @@ -0,0 +1,742 @@ + + + + + + + + + + + data.om.omdata.orca — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.om.omdata.orca

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.om.omdata.orca#

+
+

Submodules#

+ +
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/data/om/omdata/orca/recipes/index.html b/autoapi/data/om/omdata/orca/recipes/index.html new file mode 100644 index 000000000..ab3598d8e --- /dev/null +++ b/autoapi/data/om/omdata/orca/recipes/index.html @@ -0,0 +1,810 @@ + + + + + + + + + + + data.om.omdata.orca.recipes — FAIR Chemistry Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

data.om.omdata.orca.recipes

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

data.om.omdata.orca.recipes#

+
+

Module Contents#

+
+

Functions#

+ + + + + + + + + +

single_point_calculation(atoms, charge, spin_multiplicity)

Wrapper around QUACC's static job to standardize single-point calculations.

ase_relaxation(atoms, charge, spin_multiplicity[, xc, ...])

Wrapper around QUACC's ase_relax_job to standardize geometry optimizations.

+
+
+data.om.omdata.orca.recipes.single_point_calculation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, outputdir=os.getcwd(), **calc_kwargs)#
+

Wrapper around QUACC’s static job to standardize single-point calculations. +See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 +for more details.

+
+
Parameters:
+
    +
  • atoms (Atoms) – Atoms object

  • +
  • charge (int) – Charge of system

  • +
  • spin_multiplicity (int) – Multiplicity of the system

  • +
  • xc (str) – Exchange-correlaction functional

  • +
  • basis (str) – Basis set

  • +
  • orcasimpleinput (list) – List of orcasimpleinput settings for the calculator

  • +
  • orcablocks (list) – List of orcablocks swaps for the calculator

  • +
  • nprocs (int) – Number of processes to parallelize across

  • +
  • outputdir (str) – Directory to move results to upon completion

  • +
  • calc_kwargs – Additional kwargs for the custom Orca calculator

  • +
+
+
+
+ +
+
+data.om.omdata.orca.recipes.ase_relaxation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, opt_params=None, outputdir=os.getcwd(), **calc_kwargs)#
+

Wrapper around QUACC’s ase_relax_job to standardize geometry optimizations. +See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 +for more details.

+
+
Parameters:
+
    +
  • atoms (Atoms) – Atoms object

  • +
  • charge (int) – Charge of system

  • +
  • spin_multiplicity (int) – Multiplicity of the system

  • +
  • xc (str) – Exchange-correlaction functional

  • +
  • basis (str) – Basis set

  • +
  • orcasimpleinput (list) – List of orcasimpleinput settings for the calculator

  • +
  • orcablocks (list) – List of orcablocks swaps for the calculator

  • +
  • nprocs (int) – Number of processes to parallelize across

  • +
  • opt_params (dict) – Dictionary of optimizer parameters

  • +
  • outputdir (str) – Directory to move results to upon completion

  • +
  • calc_kwargs – Additional kwargs for the custom Orca calculator

  • +
+
+
+
+ +
+
+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.html deleted file mode 100644 index 43fd87c4d..000000000 --- a/autoapi/fairchem/applications/AdsorbML/adsorbml/2023_neurips_challenge/challenge_eval/index.html +++ /dev/null @@ -1,732 +0,0 @@ - - - - - - - - - - - fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval

- -
- -
-
- - - - -
- -
-

fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - -

is_successful(best_pred_energy, best_dft_energy[, ...])

Computes the success rate given the best predicted energy

compute_valid_ml_success(ml_data, dft_data)

Computes validated ML success rates.

get_dft_data(targets)

Organizes the released target mapping for evaluation lookup.

process_ml_data(results_file, model, metadata, ...)

For ML systems in which no configurations made it through the physical

parse_args()

main()

This script takes in your prediction file (npz format)

-
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.is_successful(best_pred_energy, best_dft_energy, SUCCESS_THRESHOLD=0.1)#
-

Computes the success rate given the best predicted energy -and the best ground truth DFT energy.

-

success_parity: The standard definition for success, where ML needs to be -within the SUCCESS_THRESHOLD, or lower, of the DFT energy.

-

Returns: Bool

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.compute_valid_ml_success(ml_data, dft_data)#
-

Computes validated ML success rates. -Here, results are generated only from ML. DFT single-points are used to -validate whether the ML energy is within 0.1eV of the DFT energy of the -predicted structure. If valid, the ML energy is compared to the ground -truth DFT energy, otherwise it is discarded.

-

Return validated ML success rates.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.get_dft_data(targets)#
-

Organizes the released target mapping for evaluation lookup.

-
-
Returns: Dict:
-
{

‘system_id 1’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, -‘system_id 2’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, -…

-
-
-

}

-
-
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.process_ml_data(results_file, model, metadata, ml_dft_targets, dft_data)#
-

For ML systems in which no configurations made it through the physical -constraint checks, set energies to an arbitrarily high value to ensure -a failure case in evaluation.

-
-
Returns: Dict:
-
{

‘system_id 1’: {‘config_id 1’: {‘ml_energy’: predicted energy, ‘ml+dft_energy’: dft energy of ML structure} …}, -‘system_id 2’: {‘config_id 1’: {‘ml_energy’: predicted energy, ‘ml+dft_energy’: dft energy of ML structure} …}, -…

-
-
-

}

-
-
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.parse_args()#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.2023_neurips_challenge.challenge_eval.main()#
-

This script takes in your prediction file (npz format) -and the ML model name used for ML relaxations. -Then using a mapping file, dft ground truth energy, -and ML relaxed dft energy returns the success rate of your predictions.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.html b/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.html deleted file mode 100644 index 7e3f9d64e..000000000 --- a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/dense_eval/index.html +++ /dev/null @@ -1,833 +0,0 @@ - - - - - - - - - - - fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval

- -
- -
-
- - - - -
- -
-

fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval#

-

AdsorbML evaluation script. This script expects the results-file to be -organized in a very specific structure in order to evaluate successfully.

-

Results are to be saved out in a dictionary pickle file, where keys are the -system_id and the values are energies and compute information for a -specified config_id. For each config_id that successfully passes the -physical constraints defined in the manuscript, the following information must -be provided:

-
-

ml_energy: The ML predicted adsorption energy on that particular config_id.

-

ml+dft_energy: The DFT adsorption energy (SP or RX) as evaluated on -the predicted ML config_id structure. Do note use raw DFT energies, -ensure these are referenced correctly. None if not available.

-

scf_steps: Total number of SCF steps involved in determining the DFT -adsorption energy on the predicted ML config_id. For relaxation -methods (ML+RX), sum all SCF steps across all frames. 0 if not -available.

-

ionic_steps: Total number of ionic steps in determining the DFT -adsorption energy on the predicted ML config_id. This will be 1 for -single-point methods (ML+SP). 0 if not available.

-
-

NOTE - It is possible that due to the required filtering of physical -constraints, no configurations are valid for a particular system_id. In -this case the system or config id can be excluded entirely from the -results file and will be treated as a failure point at evaluation time.

-
-
e.g.
-
{
-
“6_1134_23”:
-
{
-
“rand11”: {

“ml_energy”: -1.234, -“ml+dft_energy”: -1.456, -“scf_steps”: 33, -“ionic_steps”: 1,

-
-
-

}, -“rand5”: {

-
-

“ml_energy”: -2.489, -“ml+dft_energy”: -2.109, -“scf_steps”: 16, -“ionic_steps”: 1,

-
-

}, -. -. -.

-
-
-

},

-
-
“7_6566_62” :
-
{
-
“rand79”: {

“ml_energy”: -1.234, -“ml+dft_energy”: -1.456, -“scf_steps”: 33, -“ionic_steps”: 1,

-
-
-

}, -. -. -.

-
-
-

},

-
-
-

.

-
-
-

}

-
-
-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - -

is_successful(best_ml_dft_energy, best_dft_energy)

Computes the success rate given the best ML+DFT energy and the best ground

compute_hybrid_success(ml_data, dft_data, k)

Computes AdsorbML success rates at varying top-k values.

compute_valid_ml_success(ml_data, dft_data)

Computes validated ML success rates.

get_dft_data(targets)

Organizes the released target mapping for evaluation lookup.

get_dft_compute(counts)

Calculates the total DFT compute associated with establishing a ground

filter_ml_data(ml_data, dft_data)

For ML systems in which no configurations made it through the physical

-
-
-

Attributes#

- - - - - - - - - -

SUCCESS_THRESHOLD

parser

-
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.SUCCESS_THRESHOLD = 0.1#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.is_successful(best_ml_dft_energy, best_dft_energy)#
-

Computes the success rate given the best ML+DFT energy and the best ground -truth DFT energy.

-

success_parity: The standard definition for success, where ML needs to be -within the SUCCESS_THRESHOLD, or lower, of the DFT energy.

-

success_much_better: A system in which the ML energy is predicted to be -much lower (less than the SUCCESS_THRESHOLD) of the DFT energy.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.compute_hybrid_success(ml_data, dft_data, k)#
-

Computes AdsorbML success rates at varying top-k values. -Here, results are generated for the hybrid method, where the top-k ML -energies are used to to run DFT on the corresponding ML structures. The -resulting energies are then compared to the ground truth DFT energies.

-

Return success rates and DFT compute usage at varying k.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.compute_valid_ml_success(ml_data, dft_data)#
-

Computes validated ML success rates. -Here, results are generated only from ML. DFT single-points are used to -validate whether the ML energy is within 0.1eV of the DFT energy of the -predicted structure. If valid, the ML energy is compared to the ground -truth DFT energy, otherwise it is discarded.

-

Return validated ML success rates.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.get_dft_data(targets)#
-

Organizes the released target mapping for evaluation lookup.

-
-
oc20dense_targets.pkl:

[‘system_id 1’: [(‘config_id 1’, dft_adsorption_energy), (‘config_id 2’, dft_adsorption_energy)], `system_id 2]

-
-
Returns: Dict:
-
{

‘system_id 1’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, -‘system_id 2’: {‘config_id 1’: dft_ads_energy, ‘config_id 2’: dft_ads_energy}, -…

-
-
-

}

-
-
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.get_dft_compute(counts)#
-

Calculates the total DFT compute associated with establishing a ground -truth using the released DFT timings: oc20dense_compute.pkl.

-

Compute is measured in the total number of self-consistent steps (SC). The -total number of ionic steps is also included for reference.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.filter_ml_data(ml_data, dft_data)#
-

For ML systems in which no configurations made it through the physical -constraint checks, set energies to an arbitrarily high value to ensure -a failure case in evaluation.

-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.dense_eval.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.html b/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.html deleted file mode 100644 index 280f9b982..000000000 --- a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/process_mlrs/index.html +++ /dev/null @@ -1,722 +0,0 @@ - - - - - - - - - - - fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs

- -
- -
-
- - - - -
- -
-

fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs#

-

This script processes ML relaxations and sets it up for the next step. -- Reads final energy and structure for each relaxation -- Filters out anomalies -- Groups together all configurations for one adsorbate-surface system -- Sorts configs by lowest energy first

-

The following files are saved out: -- cache_sorted_byE.pkl: dict going from the system ID (bulk, surface, adsorbate)

-
-

to a list of configs and their relaxed structures, sorted by lowest energy first. -This is later used by write_top_k_vasp.py.

-
-
    -
  • -
    anomalies_by_sid.pkl: dict going from integer sid to boolean representing

    whether it was an anomaly. Anomalies are already excluded from cache_sorted_byE.pkl -and this file is only used for extra analyses.

    -
    -
    -
  • -
  • errors_by_sid.pkl: any errors that occurred

  • -
-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

parse_args()

min_diff(atoms_init, atoms_final)

process_mlrs(arg)

-
-
-

Attributes#

- - - - - - - - - - - - -

SURFACE_CHANGE_CUTOFF_MULTIPLIER

DESORPTION_CUTOFF_MULTIPLIER

args

-
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.SURFACE_CHANGE_CUTOFF_MULTIPLIER = 1.5#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.DESORPTION_CUTOFF_MULTIPLIER = 1.5#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.parse_args()#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.min_diff(atoms_init, atoms_final)#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.process_mlrs(arg)#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.process_mlrs.args#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.html b/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.html deleted file mode 100644 index b59c895b6..000000000 --- a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/utils/index.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - fairchem.applications.AdsorbML.adsorbml.scripts.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.AdsorbML.adsorbml.scripts.utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.applications.AdsorbML.adsorbml.scripts.utils#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

converged_oszicar(path[, nelm, ediff, idx])

--- FOR VASP USERS ---

count_scf(path)

--- FOR VASP USERS ---

-
-
-fairchem.applications.AdsorbML.adsorbml.scripts.utils.converged_oszicar(path, nelm=60, ediff=0.0001, idx=0)#
-

— FOR VASP USERS —

-

Given a folder containing DFT outputs, ensures the system has converged -electronically.

-
-
Parameters:
-
    -
  • path – Path to DFT outputs.

  • -
  • nelm – Maximum number of electronic steps used.

  • -
  • ediff – Energy difference condition for terminating the electronic loop.

  • -
  • idx – Frame to check for electronic convergence. 0 for SP, -1 for RX.

  • -
-
-
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.utils.count_scf(path)#
-

— FOR VASP USERS —

-

Given a folder containing DFT outputs, compute total ionic and SCF steps

-
-
Parameters:
-

path – Path to DFT outputs.

-
-
Returns:
-

Total number of electronic steps performed. -ionic_steps (int): Total number of ionic steps performed.

-
-
Return type:
-

scf_steps (int)

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.html b/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.html deleted file mode 100644 index 29448eb4f..000000000 --- a/autoapi/fairchem/applications/AdsorbML/adsorbml/scripts/write_top_k_vasp/index.html +++ /dev/null @@ -1,634 +0,0 @@ - - - - - - - - - - - fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp#

-
-

Module Contents#

-
-
-fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp.VASP_FLAGS#
-
- -
-
-fairchem.applications.AdsorbML.adsorbml.scripts.write_top_k_vasp.parser#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.html deleted file mode 100644 index 3069e1e73..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/autoframe/index.html +++ /dev/null @@ -1,1368 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.core.autoframe — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.core.autoframe#

-

Home of the AutoFrame classes which facillitate the generation of initial -and final frames for NEB calculations.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

AutoFrame

Base class to hold functions that are shared across the reaction types.

AutoFrameDissociation

Base class to hold functions that are shared across the reaction types.

AutoFrameTransfer

Base class to hold functions that are shared across the reaction types.

AutoFrameDesorption

Base class to hold functions that are shared across the reaction types.

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

interpolate_and_correct_frames(initial, final, ...)

Given the initial and final frames, perform the following:

get_shortest_path(initial, final)

Find the shortest path for all atoms about pbc and reorient the final frame so the

traverse_adsorbate_transfer(reaction, initial, final, ...)

Traverse reactant 1, reactant 2, product 1 and product 2 in a depth first search of

traverse_adsorbate_dissociation(reaction, initial, ...)

Traverse reactant 1, product 1 and product 2 in a depth first search of

traverse_adsorbate_desorption(reaction, initial, ...)

Traverse reactant 1 and product 1 in a depth first search of

get_product2_idx(reaction, edge_list_final, ...)

For dissociation only. Use the information about the initial edge list and final edge

traverse_adsorbate_general(traversal_rxt, slab_len, ...)

Perform the traversal to reposition atoms so that the distance along bonds is

unwrap_atoms(initial, final, reaction, map_idx)

Make corrections to the final frame so it is no longer wrapped around the cell,

interpolate(initial_frame, final_frame, num_frames)

Interpolate between the initial and final frames starting with a linear interpolation

is_edge_list_respected(frame, edge_list)

Check to see that the expected adsorbate-adsorbate edges are found and no additional

reorder_edge_list(edge_list, mapping)

For the final edge list, apply the mapping so the edges correspond to the correctly

is_adsorbate_adsorbed(adsorbate_slab_config)

Check to see if the adsorbate is adsorbed on the surface.

-
-
-class fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrame#
-

Base class to hold functions that are shared across the reaction types.

-
-
-reorder_adsorbate(frame: ase.Atoms, idx_mapping: dict)#
-

Given the adsorbate mapping, reorder the adsorbate atoms in the final frame so that -they match the initial frame to facillitate proper interpolation.

-
-
Parameters:
-
    -
  • frame (ase.Atoms) – the atoms object for which the adsorbate will be reordered

  • -
  • idx_mapping (dict) – the index mapping to reorder things

  • -
-
-
Returns:
-

the reordered adsorbate-slab configuration

-
-
Return type:
-

ase.Atoms

-
-
-
- -
-
-only_keep_unique_systems(systems, energies)#
-

Remove duplicate systems from systems and energies.

-
-
Parameters:
-
    -
  • systems (list[ase.Atoms]) – the systems to remove duplicates from

  • -
  • energies (list[float]) – the energies to remove duplicates from

  • -
-
-
Returns:
-

the systems with duplicates removed -list[float]: the energies with duplicates removed

-
-
Return type:
-

list[ase.Atoms]

-
-
-
- -
-
-get_most_proximate_symmetric_group(initial: ase.Atoms, frame: ase.Atoms)#
-

For cases where the adsorbate has symmetry and the leaving group could be different -atoms / sets of atoms, determine which one make the most sense given the geometry of -the initial and final frames. This is done by minimizing the total distance traveled -by all atoms from initial to final frame.

-
-
Parameters:
-
    -
  • initial (ase.Atoms) – the initial adsorbate-surface configuration

  • -
  • frame (ase.Atoms) – the final adsorbate-surface configuration being considered.

  • -
-
-
Returns:
-

the mapping to be used which specifies the most apt leaving group -int: the index of the mapping to be used

-
-
Return type:
-

dict

-
-
-
- -
-
-are_all_adsorbate_atoms_overlapping(adsorbate1: ase.Atoms, adsorbate2: ase.Atoms)#
-

Test to see if all the adsorbate atoms are intersecting to find unique structures. -Systems where they are overlapping are considered the same.

-
-
Parameters:
-
    -
  • adsorbate1 (ase.Atoms) – just the adsorbate atoms of a structure that is being -compared

  • -
  • adsorbate2 (ase.Atoms) – just the adsorbate atoms of the other structure that -is being compared

  • -
-
-
Returns:
-

-
True if all adsorbate atoms are overlapping (structure is a match)

False if one or more of the adsorbate atoms do not overlap

-
-
-

-
-
Return type:
-

(bool)

-
-
-
- -
- -
-
-class fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameDissociation(reaction: ocpneb.core.Reaction, reactant_system: ase.Atoms, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_product1_max: float = None, r_product2_max: float = None, r_product2_min: float = None)#
-

Bases: AutoFrame

-

Base class to hold functions that are shared across the reaction types.

-
-
-get_neb_frames(calculator, n_frames: int = 5, n_pdt1_sites: int = 5, n_pdt2_sites: int = 5, fmax: float = 0.05, steps: int = 200)#
-

Propose final frames for NEB calculations. Perform a relaxation on the final -frame using the calculator provided. Interpolate between the initial -and final frames for a proposed reaction trajectory. Correct the trajectory if -there is any atomic overlap.

-
-
Parameters:
-
    -
  • calculator – an ase compatible calculator to be used to relax the final frame.

  • -
  • n_frames (int) – the number of frames per reaction trajectory

  • -
  • n_pdt1_sites (int) – The number of product 1 sites to consider

  • -
  • n_pdt2_sites (int) – The number of product 2 sites to consider. Note this is -multiplicative with n_pdt1_sites (i.e. if n_pdt1_sites = 2 and -n_pdt2_sites = 3 then a total of 6 final frames will be proposed)

  • -
  • fmax (float) – force convergence criterion for final frame optimization

  • -
  • steps (int) – step number termination criterion for final frame optimization

  • -
-
-
Returns:
-

the initial reaction coordinates

-
-
Return type:
-

list[lists]

-
-
-
- -
-
-get_best_sites_for_product1(n_sites: int = 5)#
-

Wrapper to find product 1 placements to be considered for the final frame -of the NEB.

-
-
Parameters:
-

n_sites (int) – The number of sites for product 1 to consider. Notice this is -multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) -then a total of 6 initial and final frames will be considered.

-
-
Returns:
-

-
the lowest energy, proximate placements of product

1 to be used in the final NEB frames

-
-
-

-
-
Return type:
-

(list[ase.Atoms])

-
-
-
- -
-
-get_best_unique_sites_for_product2(product1: ase.Atoms, n_sites: int = 5)#
-

Wrapper to find product 2 placements to be considered for the final frame -of the NEB.

-
-
Parameters:
-
    -
  • product1 (ase.Atoms) – The atoms object of the product 1 placement that will be -considered in this function to search for product 1 + product 2 combinations -for the final frame.

  • -
  • n_sites (int) – The number of sites for product 1 to consider. Notice this is -multiplicative with product 2 sites (i.e. if 2 is specified here and 3 there) -then a total of 6 initial and final frames will be considered.

  • -
-
-
Returns:
-

-
the lowest energy, proximate placements of product

2 to be used in the final NEB frames

-
-
-

-
-
Return type:
-

(list[ase.Atoms])

-
-
-
- -
-
-get_sites_within_r(center_coordinate: numpy.ndarray, all_systems: list, all_system_energies: list, all_systems_binding_idx: int, allowed_radius_max: float, allowed_radius_min: float, n_sites: int = 5)#
-

Get the n lowest energy, sites of the systems within r. For now n is -5 or < 5 if there are fewer than 5 unique sites within r.

-
-
Parameters:
-
    -
  • center_coordinate (np.ndarray) – the coordinate about which r should be -centered.

  • -
  • all_systems (list) – the list of all systems to be assessed for their -uniqueness and proximity to the center coordinate.

  • -
  • all_systems_binding_idx (int) – the idx of the adsorbate atom that is -bound in all_systems

  • -
  • allowed_radius_max (float) – the outer radius about center_coordinate -in which the adsorbate must lie to be considered.

  • -
  • allowed_radius_min (float) – the inner radius about center_coordinate -which the adsorbate must lie outside of to be considered.

  • -
  • n_sites (int) – the number of unique sites in r that will be chosen.

  • -
-
-
Returns:
-

list of systems identified as candidates.

-
-
Return type:
-

(list[ase.Atoms])

-
-
-
- -
- -
-
-class fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameTransfer(reaction: ocpneb.core.Reaction, reactant1_systems: list, reactant2_systems: list, reactant1_energies: list, reactant2_energies: list, product1_systems: list, product1_energies: list, product2_systems: list, product2_energies: list, r_traverse_max: float, r_react_max: float, r_react_min: float)#
-

Bases: AutoFrame

-

Base class to hold functions that are shared across the reaction types.

-
-
-get_neb_frames(calculator, n_frames: int = 10, n_initial_frames: int = 5, n_final_frames_per_initial: int = 5, fmax: float = 0.05, steps: int = 200)#
-

Propose final frames for NEB calculations. Perform a relaxation on the final -frame using the calculator provided. Linearly interpolate between the initial -and final frames for a proposed reaction trajectory. Correct the trajectory if -there is any atomic overlap.

-
-
Parameters:
-
    -
  • calculator – an ase compatible calculator to be used to relax the initial and -final frames.

  • -
  • n_frames (int) – the number of frames per reaction trajectory

  • -
  • n_initial_frames (int) – The number of initial frames to consider

  • -
  • n_final_frames_per_initial (int) – The number of final frames per inital frame to consider

  • -
  • fmax (float) – force convergence criterion for final frame optimization

  • -
  • steps (int) – step number termination criterion for final frame optimization

  • -
-
-
Returns:
-

the initial reaction coordinates

-
-
Return type:
-

list[lists]

-
-
-
- -
-
-get_system_pairs_initial()#
-

Get the initial frames for the NEB. This is done by finding the closest -pair of systems from systems1 and systems2 for which the interstitial distance -between all adsorbate atoms is less than rmax and greater than rmin.

-
-
Returns:
-

the initial frames for the NEB -list[float]: the pseudo energies of the initial frames (i.e just the sum of the

-
-

individual adsorption energies)

-
-

-
-
Return type:
-

list[ase.Atoms]

-
-
-
- -
-
-get_system_pairs_final(system1_coord, system2_coord)#
-

Get the final frames for the NEB. This is done by finding the closest -pair of systems from systems1 and systems2 for which the distance -traversed by the adsorbate from the initial frame to the final frame is -less than rmax and the minimum interstitial distance between the two -products in greater than rmin.

-
-
Returns:
-

the initial frames for the NEB -list[float]: the pseudo energies of the initial frames

-
-
Return type:
-

list[ase.Atoms]

-
-
-
- -
- -
-
-class fairchem.applications.CatTSunami.ocpneb.core.autoframe.AutoFrameDesorption(reaction: ocpneb.core.Reaction, reactant_systems: list, reactant_energies: list, z_desorption: float)#
-

Bases: AutoFrame

-

Base class to hold functions that are shared across the reaction types.

-
-
-get_neb_frames(calculator, n_frames: int = 5, n_systems: int = 5, fmax: float = 0.05, steps: int = 200)#
-

Propose final frames for NEB calculations. Perform a relaxation on the final -frame using the calculator provided. Linearly interpolate between the initial -and final frames for a proposed reaction trajectory. Correct the trajectory if -there is any atomic overlap.

-
-
Parameters:
-
    -
  • calculator – an ase compatible calculator to be used to relax the final frame.

  • -
  • n_frames (int) – the number of frames per reaction trajectory

  • -
  • n_pdt1_sites (int) – The number of product 1 sites to consider

  • -
  • n_pdt2_sites (int) – The number of product 2 sites to consider. Note this is -multiplicative with n_pdt1_sites (i.e. if n_pdt1_sites = 2 and -n_pdt2_sites = 3 then a total of 6 final frames will be proposed)

  • -
  • fmax (float) – force convergence criterion for final frame optimization

  • -
  • steps (int) – step number termination criterion for final frame optimization

  • -
-
-
Returns:
-

the initial reaction coordinates

-
-
Return type:
-

list[lists]

-
-
-
- -
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.interpolate_and_correct_frames(initial: ase.Atoms, final: ase.Atoms, n_frames: int, reaction: ocpneb.core.Reaction, map_idx: int)#
-

Given the initial and final frames, perform the following: -(1) Unwrap the final frame if it is wrapped around the cell -(2) Interpolate between the initial and final frames

-
-
Parameters:
-
    -
  • initial (ase.Atoms) – the initial frame of the NEB

  • -
  • final (ase.Atoms) – the proposed final frame of the NEB

  • -
  • n_frames (int) – The desired number of frames for the NEB (not including initial and final)

  • -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • map_idx (int) – the index of the mapping to use for the final frame

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.get_shortest_path(initial: ase.Atoms, final: ase.Atoms)#
-

Find the shortest path for all atoms about pbc and reorient the final frame so the -atoms align with this shortest path. This allows us to perform a linear interpolation -that does not interpolate jumps across pbc.

-
-
Parameters:
-
    -
  • initial (ase.Atoms) – the initial frame of the NEB

  • -
  • final (ase.Atoms) – the proposed final frame of the NEB to be corrected

  • -
-
-
Returns:
-

the corrected final frame -(ase.Atoms): the initial frame tiled (3,3,1), which is used it later steps -(ase.Atoms): the final frame tiled (3,3,1), which is used it later steps

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_transfer(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: list)#
-

Traverse reactant 1, reactant 2, product 1 and product 2 in a depth first search of -the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures -that when we perform the linear interpolation, the adsorbate moves as a single moity -and avoids accidental bond breaking events over pbc.

-
-
Parameters:
-
    -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • initial (ase.Atoms) – the initial frame of the NEB

  • -
  • final (ase.Atoms) – the proposed final frame of the NEB to be corrected

  • -
  • initial_tiled (ase.Atoms) – the initial frame tiled (3,3,1)

  • -
  • final_tiled (ase.Atoms) – the final frame tiled (3,3,1)

  • -
  • edge_list_final (list) – the edge list of the final frame corrected with mapping -idx changes

  • -
-
-
Returns:
-

the corrected initial frame -(ase.Atoms): the corrected final frame

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_dissociation(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms, edge_list_final: int)#
-

Traverse reactant 1, product 1 and product 2 in a depth first search of -the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures -that when we perform the linear interpolation, the adsorbate moves as a single moity -and avoids accidental bond breaking events over pbc.

-
-
Parameters:
-
    -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • initial (ase.Atoms) – the initial frame of the NEB

  • -
  • final (ase.Atoms) – the proposed final frame of the NEB to be corrected

  • -
  • initial_tiled (ase.Atoms) – the initial frame tiled (3,3,1)

  • -
  • final_tiled (ase.Atoms) – the final frame tiled (3,3,1)

  • -
  • edge_list_final (list) – the edge list of the final frame corrected with mapping -idx changes

  • -
-
-
Returns:
-

the corrected initial frame -(ase.Atoms): the corrected final frame

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_desorption(reaction: ocpneb.core.Reaction, initial: ase.Atoms, final: ase.Atoms, initial_tiled: ase.Atoms, final_tiled: ase.Atoms)#
-

Traverse reactant 1 and product 1 in a depth first search of -the bond graph. Unwrap the atoms to minimize the distance over the bonds. This ensures -that when we perform the linear interpolation, the adsorbate moves as a single moity -and avoids accidental bond breaking events over pbc.

-
-
Parameters:
-
    -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • initial (ase.Atoms) – the initial frame of the NEB

  • -
  • final (ase.Atoms) – the proposed final frame of the NEB to be corrected

  • -
  • initial_tiled (ase.Atoms) – the initial frame tiled (3,3,1)

  • -
  • final_tiled (ase.Atoms) – the final frame tiled (3,3,1)

  • -
  • edge_list_final (list) – the edge list of the final frame corrected with mapping -idx changes

  • -
-
-
Returns:
-

the corrected initial frame -(ase.Atoms): the corrected final frame

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.get_product2_idx(reaction: ocpneb.core.Reaction, edge_list_final: list, traversal_rxt1_final: list)#
-

For dissociation only. Use the information about the initial edge list and final edge -list to determine which atom in product 2 lost a bond in the reaction and use this -as the binding index for traversal in traverse_adsorbate_dissociation.

-
-
Parameters:
-
    -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • edge_list_final (list) – the edge list of the final frame corrected with mapping -idx changes

  • -
  • traversal_rxt1_final (list) – the traversal of reactant 1 for the final frame

  • -
-
-
Returns:
-

the binding index of product 2

-
-
Return type:
-

(int)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.traverse_adsorbate_general(traversal_rxt, slab_len: int, starting_node_idx: int, equivalent_idx_factors: numpy.ndarray, frame: ase.Atoms, frame_tiled: ase.Atoms)#
-

Perform the traversal to reposition atoms so that the distance along bonds is -minimized.

-
-
Parameters:
-
    -
  • traversal_rxt (list) – the traversal of the adsorbate to be traversed. It is -the list of edges ordered by depth first search.

  • -
  • slab_len (int) – the number of atoms in the slab

  • -
  • starting_node_idx (int) – the index of the atom to start the traversal from

  • -
  • equivalent_idx_factors (np.ndarray) – the values to add to the untiled index -which gives equivalent indices (i.e. copies of that atom in the tiled system)

  • -
  • frame (ase.Atoms) – the frame to be corrected

  • -
  • frame_tiled (ase.Atoms) – the tiled (3,3,1) version of the frame which will be -corrected

  • -
-
-
Returns:
-

the corrected frame

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.unwrap_atoms(initial: ase.Atoms, final: ase.Atoms, reaction: ocpneb.core.Reaction, map_idx: int)#
-

Make corrections to the final frame so it is no longer wrapped around the cell, -if it has jumpped over the pbc. Ensure that for each adsorbate moity, absolute bond distances -for all edges that exist in the initial and final frames are minimize regardles of cell location. -This enforces the traversal of the adsorbates happens along the same path, which is not -necessarily the minimum distance path for each atom. Changes are made in place.

-
-
Parameters:
-
    -
  • initial (ase.Atoms) – the initial atoms object to which the final atoms should -be proximate

  • -
  • final (ase.Atoms) – the final atoms object to be corrected

  • -
  • reaction (ocpneb.core.Reaction) – the reaction object which provides pertinent info

  • -
  • map_idx (int) – the index of the mapping to use for the final frame

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.interpolate(initial_frame: ase.Atoms, final_frame: ase.Atoms, num_frames: int)#
-

Interpolate between the initial and final frames starting with a linear interpolation -along the atom-wise vectors from initial to final. Then iteratively correct the -positions so atomic overlap is avoided/ reduced. When iteratively updating, the -positions of adjacent frames are considered to avoid large jumps in the trajectory.

-
-
Parameters:
-
    -
  • initial_frame (ase.Atoms) – the initial frame which will be interpolated from

  • -
  • final_frame (ase.Atoms) – the final frame which will be interpolated to

  • -
  • num_frames (int) – the number of frames to be interpolated between the initial

  • -
-
-
Returns:
-

the interpolated frames

-
-
Return type:
-

(list[ase.Atoms])

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.is_edge_list_respected(frame: ase.Atoms, edge_list: list)#
-

Check to see that the expected adsorbate-adsorbate edges are found and no additional -edges exist between the adsorbate atoms.

-
-
Parameters:
-
    -
  • frame (ase.Atoms) – the atoms object for which edges will be checked. -This must comply with ocp tagging conventions.

  • -
  • edge_list (list[tuples]) – The expected edges

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.reorder_edge_list(edge_list: list, mapping: dict)#
-

For the final edge list, apply the mapping so the edges correspond to the correctly -concatenated object.

-
-
Parameters:
-
    -
  • edge_list (list[tuples]) – the final edgelist

  • -
  • mapping – the mapping so the final atoms concatenated have indices that correctly map -to the initial atoms.

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.core.autoframe.is_adsorbate_adsorbed(adsorbate_slab_config: ase.Atoms)#
-

Check to see if the adsorbate is adsorbed on the surface.

-
-
Parameters:
-

adsorbate_slab_config (ase.Atoms) – the combined adsorbate and slab configuration -with adsorbate atoms tagged as 2s and surface atoms tagged as 1s.

-
-
Returns:
-

True if the adsorbate is adsorbed, False otherwise.

-
-
Return type:
-

(bool)

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.html deleted file mode 100644 index 6f881e5d0..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/index.html +++ /dev/null @@ -1,623 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.core — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.CatTSunami.ocpneb.core

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.core#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.html deleted file mode 100644 index f62b0be4b..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/ocpneb/index.html +++ /dev/null @@ -1,682 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.core.ocpneb — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.CatTSunami.ocpneb.core.ocpneb

- -
- -
-
- - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.core.ocpneb#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

OCPNEB

-
-
-class fairchem.applications.CatTSunami.ocpneb.core.ocpneb.OCPNEB(images, checkpoint_path, k=0.1, fmax=0.05, climb=False, parallel=False, remove_rotation_and_translation=False, world=None, dynamic_relaxation=True, scale_fmax=0.0, method='aseneb', allow_shared_calculator=False, precon=None, cpu=False, batch_size=4)#
-

Bases: ase.neb.DyNEB

-
-
-load_checkpoint(checkpoint_path: str) None#
-

Load existing trained model

-
-
Parameters:
-

checkpoint_path – string -Path to trained model

-
-
-
- -
-
-get_forces()#
-

Evaluate and return the forces.

-
- -
-
-set_positions(positions)#
-
- -
-
-get_precon_forces(forces, energies, images)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.html deleted file mode 100644 index abc7bfba2..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/core/reaction/index.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.core.reaction — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.CatTSunami.ocpneb.core.reaction

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.core.reaction#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Reaction

Initialize Reaction object

-
-
-class fairchem.applications.CatTSunami.ocpneb.core.reaction.Reaction(reaction_db_path: str, adsorbate_db_path: str, reaction_id_from_db: int = None, reaction_str_from_db: str = None, reaction_type: str = None)#
-

Initialize Reaction object

-
-
-get_desorption_mapping(reactant)#
-

Get mapping for desorption reaction

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.html deleted file mode 100644 index c55d3699a..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/databases/index.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.databases — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.CatTSunami.ocpneb.databases

- -
- -
-
- - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.databases#

-
-

Package Contents#

-
-
-fairchem.applications.CatTSunami.ocpneb.databases.DISSOCIATION_REACTION_DB_PATH#
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.databases.DESORPTION_REACTION_DB_PATH#
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.databases.TRANSFER_REACTION_DB_PATH#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.html b/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.html deleted file mode 100644 index 31372ed6f..000000000 --- a/autoapi/fairchem/applications/CatTSunami/ocpneb/run_validation/run_validation/index.html +++ /dev/null @@ -1,877 +0,0 @@ - - - - - - - - - - - fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation

- -
- -
-
- - - - -
- -
-

fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation#

-

A python script to run a validation of the ML NEB model on a set of NEB calculations. -This script has not been written to run in parallel, but should be modified to do so.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

get_results_sp(df2)

Get the % success and % convergence for the model considered with

get_results_ml(df2)

Get the % success and % convergence for the model considered with

all_converged(row[, ml])

Dataframe function which makes the job of filtering to get % success cleaner.

both_barrierless(row)

Dataframe function which makes the job of filtering to get % success cleaner.

both_barriered(row)

Dataframe function which makes the job of filtering to get % success cleaner.

barrierless_converged(row)

Dataframe function which makes the job of filtering to get % success cleaner.

is_failed_sp(row)

Dataframe function which makes the job of filtering to get % success cleaner.

parse_neb_info(neb_frames, calc, conv, entry)

At the conclusion of the ML NEB, this function processes the important

get_single_point(atoms, vasp_dir, vasp_flags, vasp_command)

Gets a single point on the atoms passed.

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_results_sp(df2: pandas.DataFrame)#
-

Get the % success and % convergence for the model considered with -single points performed on the transition states.

-
-
Parameters:
-

df2 (pd.DataFrame) – The dataframe containing the results of the -NEB calculations.

-
-
Returns:
-

-
a tuple of strings containing the % success and

% convergence

-
-
-

-
-
Return type:
-

(tuple[str])

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_results_ml(df2)#
-

Get the % success and % convergence for the model considered with -just ML energy and force calls.

-
-
Parameters:
-

df2 (pd.DataFrame) – The dataframe containing the results of the -NEB calculations.

-
-
Returns:
-

-
a tuple of strings containing the % success and

% convergence

-
-
-

-
-
Return type:
-

(tuple[str])

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.all_converged(row, ml=True)#
-

Dataframe function which makes the job of filtering to get % success cleaner. -It assesses the convergence.

-
-
Parameters:
-
    -
  • row – the dataframe row which the function is applied to

  • -
  • ml – boolean value. If True just the ML NEB and DFT NEB convergence are -considered. If False, the single point convergence is also considered.

  • -
-
-
Returns:
-

whether the system is converged

-
-
Return type:
-

bool

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.both_barrierless(row)#
-

Dataframe function which makes the job of filtering to get % success cleaner. -It assesses if both DFT and ML find a barrierless transition state.

-
-
Parameters:
-

row – the dataframe row which the function is applied to

-
-
Returns:
-

True if both ML and DFT find a barrierless transition state, False otherwise

-
-
Return type:
-

bool

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.both_barriered(row)#
-

Dataframe function which makes the job of filtering to get % success cleaner. -It assesses if both DFT and ML find a barriered transition state.

-
-
Parameters:
-

row – the dataframe row which the function is applied to

-
-
Returns:
-

True if both ML and DFT find a barriered transition state, False otherwise

-
-
Return type:
-

bool

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.barrierless_converged(row)#
-

Dataframe function which makes the job of filtering to get % success cleaner. -It assesses if both DFT and ML find a barrierless, converged transition state.

-
-
Parameters:
-

row – the dataframe row which the function is applied to

-
-
Returns:
-

-
True if both ML and DFT find a barrierless converged transition state,

False otherwise

-
-
-

-
-
Return type:
-

bool

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.is_failed_sp(row)#
-

Dataframe function which makes the job of filtering to get % success cleaner. -It assesses if the single point failed.

-
-
Parameters:
-

row – the dataframe row which the function is applied to

-
-
Returns:
-

True if ths single point failed, otherwise False

-
-
Return type:
-

bool

-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.parse_neb_info(neb_frames: list, calc, conv: bool, entry: dict)#
-

At the conclusion of the ML NEB, this function processes the important -results and adds them to the entry dictionary.

-
-
Parameters:
-
    -
  • neb_frames (list[ase.Atoms]) – the ML relaxed NEB frames

  • -
  • calc – the ocp ase Atoms calculator

  • -
  • conv (bool) – whether or not the NEB achieved forces below the threshold within -the number of allowed steps

  • -
  • entry (dict) – the entry corresponding to the NEB performed

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.get_single_point(atoms: ase.Atoms, vasp_dir: str, vasp_flags: dict, vasp_command: str)#
-

Gets a single point on the atoms passed.

-
-
Parameters:
-
    -
  • atoms (ase.Atoms) – the atoms object on which the single point will be performed

  • -
  • vasp_dir (str) – the path where the vasp files should be written

  • -
  • vasp_flags – a dictionary of the vasp INCAR flags

  • -
  • vasp_command (str) – the

  • -
-
-
-
- -
-
-fairchem.applications.CatTSunami.ocpneb.run_validation.run_validation.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/_cli/index.html b/autoapi/fairchem/core/_cli/index.html deleted file mode 100644 index 7b1e6f729..000000000 --- a/autoapi/fairchem/core/_cli/index.html +++ /dev/null @@ -1,694 +0,0 @@ - - - - - - - - - - - fairchem.core._cli — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core._cli

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core._cli#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Runner

Derived callable classes are requeued after timeout with their current

-
-
-

Functions#

- - - - - - -

main()

Run the main ocp-models program.

-
-
-class fairchem.core._cli.Runner(distributed: bool = False)#
-

Bases: submitit.helpers.Checkpointable

-

Derived callable classes are requeued after timeout with their current -state dumped at checkpoint.

-

__call__ method must be implemented to make your class a callable.

-
-

Note

-

The following implementation of the checkpoint method resubmits the full current -state of the callable (self) with the initial argument. You may want to replace the method to -curate the state (dump a neural network to a standard format and remove it from -the state so that not to pickle it) and change/remove the initial parameters.

-
-
-
-__call__(config: dict) None#
-
- -
-
-checkpoint(*args, **kwargs)#
-

Resubmits the same callable with the same arguments

-
- -
- -
-
-fairchem.core._cli.main()#
-

Run the main ocp-models program.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/data_parallel/index.html b/autoapi/fairchem/core/common/data_parallel/index.html deleted file mode 100644 index 91b0addb8..000000000 --- a/autoapi/fairchem/core/common/data_parallel/index.html +++ /dev/null @@ -1,847 +0,0 @@ - - - - - - - - - - - fairchem.core.common.data_parallel — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.data_parallel#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

OCPCollater

_HasMetadata

Base class for protocol classes.

StatefulDistributedSampler

More fine-grained state DataSampler that uses training iteration and epoch

BalancedBatchSampler

Base class for all Samplers.

-
-
-

Functions#

- - - - - - -

balanced_partition(sizes, num_parts)

Greedily partition the given set by always inserting

-
-
-class fairchem.core.common.data_parallel.OCPCollater(otf_graph: bool = False)#
-
-
-__call__(data_list: list[torch_geometric.data.Data]) torch_geometric.data.Batch#
-
- -
- -
-
-fairchem.core.common.data_parallel.balanced_partition(sizes: numpy.typing.NDArray[numpy.int_], num_parts: int)#
-

Greedily partition the given set by always inserting -the largest element into the smallest partition.

-
- -
-
-class fairchem.core.common.data_parallel._HasMetadata#
-

Bases: Protocol

-

Base class for protocol classes.

-

Protocol classes are defined as:

-
class Proto(Protocol):
-    def meth(self) -> int:
-        ...
-
-
-

Such classes are primarily used with static type checkers that recognize -structural subtyping (static duck-typing).

-

For example:

-
class C:
-    def meth(self) -> int:
-        return 0
-
-def func(x: Proto) -> int:
-    return x.meth()
-
-func(C())  # Passes static type check
-
-
-

See PEP 544 for details. Protocol classes decorated with -@typing.runtime_checkable act as simple-minded runtime protocols that check -only the presence of given attributes, ignoring their type signatures. -Protocol classes can be generic, they are defined as:

-
class GenProto(Protocol[T]):
-    def meth(self) -> T:
-        ...
-
-
-
-
-property metadata_path: pathlib.Path#
-
- -
- -
-
-class fairchem.core.common.data_parallel.StatefulDistributedSampler(dataset, batch_size, **kwargs)#
-

Bases: torch.utils.data.DistributedSampler

-

More fine-grained state DataSampler that uses training iteration and epoch -both for shuffling data. PyTorch DistributedSampler only uses epoch -for the shuffling and starts sampling data from the start. In case of training -on very large data, we train for one epoch only and when we resume training, -we want to resume the data sampler from the training iteration.

-
-
-__iter__()#
-
- -
-
-set_epoch_and_start_iteration(epoch, start_iter)#
-
- -
- -
-
-class fairchem.core.common.data_parallel.BalancedBatchSampler(dataset, batch_size: int, num_replicas: int, rank: int, device: torch.device, mode: str | bool = 'atoms', shuffle: bool = True, drop_last: bool = False, force_balancing: bool = False, throw_on_error: bool = False)#
-

Bases: torch.utils.data.Sampler

-

Base class for all Samplers.

-

Every Sampler subclass has to provide an __iter__() method, providing a -way to iterate over indices or lists of indices (batches) of dataset elements, and a __len__() method -that returns the length of the returned iterators.

-
-
Parameters:
-

data_source (Dataset) – This argument is not used and will be removed in 2.2.0. -You may still have custom implementation that utilizes it.

-
-
-

Example

-
>>> # xdoctest: +SKIP
->>> class AccedingSequenceLengthSampler(Sampler[int]):
->>>     def __init__(self, data: List[str]) -> None:
->>>         self.data = data
->>>
->>>     def __len__(self) -> int:
->>>         return len(self.data)
->>>
->>>     def __iter__(self) -> Iterator[int]:
->>>         sizes = torch.tensor([len(x) for x in self.data])
->>>         yield from torch.argsort(sizes).tolist()
->>>
->>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]):
->>>     def __init__(self, data: List[str], batch_size: int) -> None:
->>>         self.data = data
->>>         self.batch_size = batch_size
->>>
->>>     def __len__(self) -> int:
->>>         return (len(self.data) + self.batch_size - 1) // self.batch_size
->>>
->>>     def __iter__(self) -> Iterator[List[int]]:
->>>         sizes = torch.tensor([len(x) for x in self.data])
->>>         for batch in torch.chunk(torch.argsort(sizes), len(self)):
->>>             yield batch.tolist()
-
-
-
-

Note

-

The __len__() method isn’t strictly required by -DataLoader, but is expected in any -calculation involving the length of a DataLoader.

-
-
-
-_load_dataset(dataset, mode: Literal[atoms, neighbors])#
-
- -
-
-__len__() int#
-
- -
-
-set_epoch_and_start_iteration(epoch: int, start_iteration: int) None#
-
- -
-
-__iter__()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/distutils/index.html b/autoapi/fairchem/core/common/distutils/index.html deleted file mode 100644 index 5ab82228f..000000000 --- a/autoapi/fairchem/core/common/distutils/index.html +++ /dev/null @@ -1,774 +0,0 @@ - - - - - - - - - - - fairchem.core.common.distutils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.distutils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

os_environ_get_or_throw(→ str)

setup(→ None)

cleanup(→ None)

initialized(→ bool)

get_rank(→ int)

get_world_size(→ int)

is_master(→ bool)

synchronize(→ None)

broadcast(→ None)

all_reduce(→ torch.Tensor)

all_gather(→ list[torch.Tensor])

gather_objects(→ list[T])

Gather a list of pickleable objects into rank 0

-
-
-

Attributes#

- - - - - - -

T

-
-
-fairchem.core.common.distutils.T#
-
- -
-
-fairchem.core.common.distutils.os_environ_get_or_throw(x: str) str#
-
- -
-
-fairchem.core.common.distutils.setup(config) None#
-
- -
-
-fairchem.core.common.distutils.cleanup() None#
-
- -
-
-fairchem.core.common.distutils.initialized() bool#
-
- -
-
-fairchem.core.common.distutils.get_rank() int#
-
- -
-
-fairchem.core.common.distutils.get_world_size() int#
-
- -
-
-fairchem.core.common.distutils.is_master() bool#
-
- -
-
-fairchem.core.common.distutils.synchronize() None#
-
- -
-
-fairchem.core.common.distutils.broadcast(tensor: torch.Tensor, src, group=dist.group.WORLD, async_op: bool = False) None#
-
- -
-
-fairchem.core.common.distutils.all_reduce(data, group=dist.group.WORLD, average: bool = False, device=None) torch.Tensor#
-
- -
-
-fairchem.core.common.distutils.all_gather(data, group=dist.group.WORLD, device=None) list[torch.Tensor]#
-
- -
-
-fairchem.core.common.distutils.gather_objects(data: T, group: torch.distributed.ProcessGroup = dist.group.WORLD) list[T]#
-

Gather a list of pickleable objects into rank 0

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/flags/index.html b/autoapi/fairchem/core/common/flags/index.html deleted file mode 100644 index a235d5a47..000000000 --- a/autoapi/fairchem/core/common/flags/index.html +++ /dev/null @@ -1,681 +0,0 @@ - - - - - - - - - - - fairchem.core.common.flags — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.flags

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common.flags#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Flags

-
-
-

Attributes#

- - - - - - -

flags

-
-
-class fairchem.core.common.flags.Flags#
-
-
-get_parser() argparse.ArgumentParser#
-
- -
-
-add_core_args() None#
-
- -
- -
-
-fairchem.core.common.flags.flags#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/gp_utils/index.html b/autoapi/fairchem/core/common/gp_utils/index.html deleted file mode 100644 index ee0e8c698..000000000 --- a/autoapi/fairchem/core/common/gp_utils/index.html +++ /dev/null @@ -1,1344 +0,0 @@ - - - - - - - - - - - fairchem.core.common.gp_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.gp_utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

CopyToModelParallelRegion

Base class to create custom autograd.Function.

ReduceFromModelParallelRegion

Base class to create custom autograd.Function.

ScatterToModelParallelRegion

Base class to create custom autograd.Function.

GatherFromModelParallelRegion

Base class to create custom autograd.Function.

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

ensure_div(→ None)

divide_and_check_no_remainder(→ int)

setup_gp(→ None)

cleanup_gp(→ None)

initialized(→ bool)

get_dp_group()

get_gp_group()

get_dp_rank(→ int)

get_gp_rank(→ int)

get_dp_world_size(→ int)

get_gp_world_size(→ int)

pad_tensor(→ torch.Tensor)

trim_tensor(tensor[, sizes, dim])

_split_tensor(tensor, num_parts[, dim, contiguous_chunks])

_reduce(→ torch.Tensor)

_split(→ torch.Tensor)

_gather(→ torch.Tensor)

_gather_with_padding(→ torch.Tensor)

copy_to_model_parallel_region(→ torch.Tensor)

reduce_from_model_parallel_region(→ torch.Tensor)

scatter_to_model_parallel_region(→ torch.Tensor)

gather_from_model_parallel_region(→ torch.Tensor)

-
-
-

Attributes#

- - - - - - - - - -

_GRAPH_PARALLEL_GROUP

_DATA_PARALLEL_GROUP

-
-
-fairchem.core.common.gp_utils._GRAPH_PARALLEL_GROUP#
-
- -
-
-fairchem.core.common.gp_utils._DATA_PARALLEL_GROUP#
-
- -
-
-fairchem.core.common.gp_utils.ensure_div(a: int, b: int) None#
-
- -
-
-fairchem.core.common.gp_utils.divide_and_check_no_remainder(a: int, b: int) int#
-
- -
-
-fairchem.core.common.gp_utils.setup_gp(config) None#
-
- -
-
-fairchem.core.common.gp_utils.cleanup_gp() None#
-
- -
-
-fairchem.core.common.gp_utils.initialized() bool#
-
- -
-
-fairchem.core.common.gp_utils.get_dp_group()#
-
- -
-
-fairchem.core.common.gp_utils.get_gp_group()#
-
- -
-
-fairchem.core.common.gp_utils.get_dp_rank() int#
-
- -
-
-fairchem.core.common.gp_utils.get_gp_rank() int#
-
- -
-
-fairchem.core.common.gp_utils.get_dp_world_size() int#
-
- -
-
-fairchem.core.common.gp_utils.get_gp_world_size() int#
-
- -
-
-fairchem.core.common.gp_utils.pad_tensor(tensor: torch.Tensor, dim: int = -1, target_size: int | None = None) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils.trim_tensor(tensor: torch.Tensor, sizes: torch.Tensor | None = None, dim: int = 0)#
-
- -
-
-fairchem.core.common.gp_utils._split_tensor(tensor: torch.Tensor, num_parts: int, dim: int = -1, contiguous_chunks: bool = False)#
-
- -
-
-fairchem.core.common.gp_utils._reduce(ctx: Any, input: torch.Tensor) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils._split(input: torch.Tensor, dim: int = -1) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils._gather(input: torch.Tensor, dim: int = -1) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils._gather_with_padding(input: torch.Tensor, dim: int = -1) torch.Tensor#
-
- -
-
-class fairchem.core.common.gp_utils.CopyToModelParallelRegion(*args, **kwargs)#
-

Bases: torch.autograd.Function

-

Base class to create custom autograd.Function.

-

To create a custom autograd.Function, subclass this class and implement -the forward() and backward() static methods. Then, to use your custom -op in the forward pass, call the class method apply. Do not call -forward() directly.

-

To ensure correctness and best performance, make sure you are calling the -correct methods on ctx and validating your backward function using -torch.autograd.gradcheck().

-

See extending-autograd for more details on how to use this class.

-

Examples:

-
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
->>> class Exp(Function):
->>>     @staticmethod
->>>     def forward(ctx, i):
->>>         result = i.exp()
->>>         ctx.save_for_backward(result)
->>>         return result
->>>
->>>     @staticmethod
->>>     def backward(ctx, grad_output):
->>>         result, = ctx.saved_tensors
->>>         return grad_output * result
->>>
->>> # Use it by calling the apply method:
->>> # xdoctest: +SKIP
->>> output = Exp.apply(input)
-
-
-
-
-static forward(ctx, input: torch.Tensor) torch.Tensor#
-

Define the forward of the custom autograd Function.

-

This function is to be overridden by all subclasses. -There are two ways to define forward:

-

Usage 1 (Combined forward and ctx):

-
@staticmethod
-def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
-    pass
-
-
-
    -
  • It must accept a context ctx as the first argument, followed by any -number of arguments (tensors or other types).

  • -
  • See combining-forward-context for more details

  • -
-

Usage 2 (Separate forward and ctx):

-
@staticmethod
-def forward(*args: Any, **kwargs: Any) -> Any:
-    pass
-
-@staticmethod
-def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
-    pass
-
-
-
    -
  • The forward no longer accepts a ctx argument.

  • -
  • Instead, you must also override the torch.autograd.Function.setup_context() -staticmethod to handle setting up the ctx object. -output is the output of the forward, inputs are a Tuple of inputs -to the forward.

  • -
  • See extending-autograd for more details

  • -
-

The context can be used to store arbitrary data that can be then -retrieved during the backward pass. Tensors should not be stored -directly on ctx (though this is not currently enforced for -backward compatibility). Instead, tensors should be saved either with -ctx.save_for_backward() if they are intended to be used in -backward (equivalently, vjp) or ctx.save_for_forward() -if they are intended to be used for in jvp.

-
- -
-
-static backward(ctx, grad_output: torch.Tensor) torch.Tensor#
-

Define a formula for differentiating the operation with backward mode automatic differentiation.

-

This function is to be overridden by all subclasses. -(Defining this function is equivalent to defining the vjp function.)

-

It must accept a context ctx as the first argument, followed by -as many outputs as the forward() returned (None will be passed in -for non tensor outputs of the forward function), -and it should return as many tensors, as there were inputs to -forward(). Each argument is the gradient w.r.t the given output, -and each returned value should be the gradient w.r.t. the -corresponding input. If an input is not a Tensor or is a Tensor not -requiring grads, you can just pass None as a gradient for that input.

-

The context can be used to retrieve tensors saved during the forward -pass. It also has an attribute ctx.needs_input_grad as a tuple -of booleans representing whether each input needs gradient. E.g., -backward() will have ctx.needs_input_grad[0] = True if the -first input to forward() needs gradient computed w.r.t. the -output.

-
- -
- -
-
-class fairchem.core.common.gp_utils.ReduceFromModelParallelRegion(*args, **kwargs)#
-

Bases: torch.autograd.Function

-

Base class to create custom autograd.Function.

-

To create a custom autograd.Function, subclass this class and implement -the forward() and backward() static methods. Then, to use your custom -op in the forward pass, call the class method apply. Do not call -forward() directly.

-

To ensure correctness and best performance, make sure you are calling the -correct methods on ctx and validating your backward function using -torch.autograd.gradcheck().

-

See extending-autograd for more details on how to use this class.

-

Examples:

-
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
->>> class Exp(Function):
->>>     @staticmethod
->>>     def forward(ctx, i):
->>>         result = i.exp()
->>>         ctx.save_for_backward(result)
->>>         return result
->>>
->>>     @staticmethod
->>>     def backward(ctx, grad_output):
->>>         result, = ctx.saved_tensors
->>>         return grad_output * result
->>>
->>> # Use it by calling the apply method:
->>> # xdoctest: +SKIP
->>> output = Exp.apply(input)
-
-
-
-
-static forward(ctx, input: torch.Tensor) torch.Tensor#
-

Define the forward of the custom autograd Function.

-

This function is to be overridden by all subclasses. -There are two ways to define forward:

-

Usage 1 (Combined forward and ctx):

-
@staticmethod
-def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
-    pass
-
-
-
    -
  • It must accept a context ctx as the first argument, followed by any -number of arguments (tensors or other types).

  • -
  • See combining-forward-context for more details

  • -
-

Usage 2 (Separate forward and ctx):

-
@staticmethod
-def forward(*args: Any, **kwargs: Any) -> Any:
-    pass
-
-@staticmethod
-def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
-    pass
-
-
-
    -
  • The forward no longer accepts a ctx argument.

  • -
  • Instead, you must also override the torch.autograd.Function.setup_context() -staticmethod to handle setting up the ctx object. -output is the output of the forward, inputs are a Tuple of inputs -to the forward.

  • -
  • See extending-autograd for more details

  • -
-

The context can be used to store arbitrary data that can be then -retrieved during the backward pass. Tensors should not be stored -directly on ctx (though this is not currently enforced for -backward compatibility). Instead, tensors should be saved either with -ctx.save_for_backward() if they are intended to be used in -backward (equivalently, vjp) or ctx.save_for_forward() -if they are intended to be used for in jvp.

-
- -
-
-static backward(ctx, grad_output: torch.Tensor) torch.Tensor#
-

Define a formula for differentiating the operation with backward mode automatic differentiation.

-

This function is to be overridden by all subclasses. -(Defining this function is equivalent to defining the vjp function.)

-

It must accept a context ctx as the first argument, followed by -as many outputs as the forward() returned (None will be passed in -for non tensor outputs of the forward function), -and it should return as many tensors, as there were inputs to -forward(). Each argument is the gradient w.r.t the given output, -and each returned value should be the gradient w.r.t. the -corresponding input. If an input is not a Tensor or is a Tensor not -requiring grads, you can just pass None as a gradient for that input.

-

The context can be used to retrieve tensors saved during the forward -pass. It also has an attribute ctx.needs_input_grad as a tuple -of booleans representing whether each input needs gradient. E.g., -backward() will have ctx.needs_input_grad[0] = True if the -first input to forward() needs gradient computed w.r.t. the -output.

-
- -
- -
-
-class fairchem.core.common.gp_utils.ScatterToModelParallelRegion(*args, **kwargs)#
-

Bases: torch.autograd.Function

-

Base class to create custom autograd.Function.

-

To create a custom autograd.Function, subclass this class and implement -the forward() and backward() static methods. Then, to use your custom -op in the forward pass, call the class method apply. Do not call -forward() directly.

-

To ensure correctness and best performance, make sure you are calling the -correct methods on ctx and validating your backward function using -torch.autograd.gradcheck().

-

See extending-autograd for more details on how to use this class.

-

Examples:

-
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
->>> class Exp(Function):
->>>     @staticmethod
->>>     def forward(ctx, i):
->>>         result = i.exp()
->>>         ctx.save_for_backward(result)
->>>         return result
->>>
->>>     @staticmethod
->>>     def backward(ctx, grad_output):
->>>         result, = ctx.saved_tensors
->>>         return grad_output * result
->>>
->>> # Use it by calling the apply method:
->>> # xdoctest: +SKIP
->>> output = Exp.apply(input)
-
-
-
-
-static forward(ctx, input: torch.Tensor, dim: int = -1) torch.Tensor#
-

Define the forward of the custom autograd Function.

-

This function is to be overridden by all subclasses. -There are two ways to define forward:

-

Usage 1 (Combined forward and ctx):

-
@staticmethod
-def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
-    pass
-
-
-
    -
  • It must accept a context ctx as the first argument, followed by any -number of arguments (tensors or other types).

  • -
  • See combining-forward-context for more details

  • -
-

Usage 2 (Separate forward and ctx):

-
@staticmethod
-def forward(*args: Any, **kwargs: Any) -> Any:
-    pass
-
-@staticmethod
-def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
-    pass
-
-
-
    -
  • The forward no longer accepts a ctx argument.

  • -
  • Instead, you must also override the torch.autograd.Function.setup_context() -staticmethod to handle setting up the ctx object. -output is the output of the forward, inputs are a Tuple of inputs -to the forward.

  • -
  • See extending-autograd for more details

  • -
-

The context can be used to store arbitrary data that can be then -retrieved during the backward pass. Tensors should not be stored -directly on ctx (though this is not currently enforced for -backward compatibility). Instead, tensors should be saved either with -ctx.save_for_backward() if they are intended to be used in -backward (equivalently, vjp) or ctx.save_for_forward() -if they are intended to be used for in jvp.

-
- -
-
-static backward(ctx, grad_output: torch.Tensor)#
-

Define a formula for differentiating the operation with backward mode automatic differentiation.

-

This function is to be overridden by all subclasses. -(Defining this function is equivalent to defining the vjp function.)

-

It must accept a context ctx as the first argument, followed by -as many outputs as the forward() returned (None will be passed in -for non tensor outputs of the forward function), -and it should return as many tensors, as there were inputs to -forward(). Each argument is the gradient w.r.t the given output, -and each returned value should be the gradient w.r.t. the -corresponding input. If an input is not a Tensor or is a Tensor not -requiring grads, you can just pass None as a gradient for that input.

-

The context can be used to retrieve tensors saved during the forward -pass. It also has an attribute ctx.needs_input_grad as a tuple -of booleans representing whether each input needs gradient. E.g., -backward() will have ctx.needs_input_grad[0] = True if the -first input to forward() needs gradient computed w.r.t. the -output.

-
- -
- -
-
-class fairchem.core.common.gp_utils.GatherFromModelParallelRegion(*args, **kwargs)#
-

Bases: torch.autograd.Function

-

Base class to create custom autograd.Function.

-

To create a custom autograd.Function, subclass this class and implement -the forward() and backward() static methods. Then, to use your custom -op in the forward pass, call the class method apply. Do not call -forward() directly.

-

To ensure correctness and best performance, make sure you are calling the -correct methods on ctx and validating your backward function using -torch.autograd.gradcheck().

-

See extending-autograd for more details on how to use this class.

-

Examples:

-
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
->>> class Exp(Function):
->>>     @staticmethod
->>>     def forward(ctx, i):
->>>         result = i.exp()
->>>         ctx.save_for_backward(result)
->>>         return result
->>>
->>>     @staticmethod
->>>     def backward(ctx, grad_output):
->>>         result, = ctx.saved_tensors
->>>         return grad_output * result
->>>
->>> # Use it by calling the apply method:
->>> # xdoctest: +SKIP
->>> output = Exp.apply(input)
-
-
-
-
-static forward(ctx, input: torch.Tensor, dim: int = -1) torch.Tensor#
-

Define the forward of the custom autograd Function.

-

This function is to be overridden by all subclasses. -There are two ways to define forward:

-

Usage 1 (Combined forward and ctx):

-
@staticmethod
-def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
-    pass
-
-
-
    -
  • It must accept a context ctx as the first argument, followed by any -number of arguments (tensors or other types).

  • -
  • See combining-forward-context for more details

  • -
-

Usage 2 (Separate forward and ctx):

-
@staticmethod
-def forward(*args: Any, **kwargs: Any) -> Any:
-    pass
-
-@staticmethod
-def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
-    pass
-
-
-
    -
  • The forward no longer accepts a ctx argument.

  • -
  • Instead, you must also override the torch.autograd.Function.setup_context() -staticmethod to handle setting up the ctx object. -output is the output of the forward, inputs are a Tuple of inputs -to the forward.

  • -
  • See extending-autograd for more details

  • -
-

The context can be used to store arbitrary data that can be then -retrieved during the backward pass. Tensors should not be stored -directly on ctx (though this is not currently enforced for -backward compatibility). Instead, tensors should be saved either with -ctx.save_for_backward() if they are intended to be used in -backward (equivalently, vjp) or ctx.save_for_forward() -if they are intended to be used for in jvp.

-
- -
-
-static backward(ctx, grad_output: torch.Tensor)#
-

Define a formula for differentiating the operation with backward mode automatic differentiation.

-

This function is to be overridden by all subclasses. -(Defining this function is equivalent to defining the vjp function.)

-

It must accept a context ctx as the first argument, followed by -as many outputs as the forward() returned (None will be passed in -for non tensor outputs of the forward function), -and it should return as many tensors, as there were inputs to -forward(). Each argument is the gradient w.r.t the given output, -and each returned value should be the gradient w.r.t. the -corresponding input. If an input is not a Tensor or is a Tensor not -requiring grads, you can just pass None as a gradient for that input.

-

The context can be used to retrieve tensors saved during the forward -pass. It also has an attribute ctx.needs_input_grad as a tuple -of booleans representing whether each input needs gradient. E.g., -backward() will have ctx.needs_input_grad[0] = True if the -first input to forward() needs gradient computed w.r.t. the -output.

-
- -
- -
-
-fairchem.core.common.gp_utils.copy_to_model_parallel_region(input: torch.Tensor) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils.reduce_from_model_parallel_region(input: torch.Tensor) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils.scatter_to_model_parallel_region(input: torch.Tensor, dim: int = -1) torch.Tensor#
-
- -
-
-fairchem.core.common.gp_utils.gather_from_model_parallel_region(input: torch.Tensor, dim: int = -1) torch.Tensor#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/hpo_utils/index.html b/autoapi/fairchem/core/common/hpo_utils/index.html deleted file mode 100644 index 2a9a0235b..000000000 --- a/autoapi/fairchem/core/common/hpo_utils/index.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - fairchem.core.common.hpo_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.hpo_utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common.hpo_utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

tune_reporter(→ None)

Wrapper function for tune.report()

label_metric_dict(metric_dict, split)

-
-
-fairchem.core.common.hpo_utils.tune_reporter(iters, train_metrics, val_metrics, test_metrics=None, metric_to_opt: str = 'val_loss', min_max: str = 'min') None#
-

Wrapper function for tune.report()

-
-
Parameters:
-
    -
  • iters (dict) – dict with training iteration info (e.g. steps, epochs)

  • -
  • train_metrics (dict) – train metrics dict

  • -
  • val_metrics (dict) – val metrics dict

  • -
  • test_metrics (dict, optional) – test metrics dict, default is None

  • -
  • metric_to_opt (str, optional) – str for val metric to optimize, default is val_loss

  • -
  • min_max (str, optional) – either “min” or “max”, determines whether metric_to_opt is to be minimized or maximized, default is min

  • -
-
-
-
- -
-
-fairchem.core.common.hpo_utils.label_metric_dict(metric_dict, split)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/index.html b/autoapi/fairchem/core/common/index.html deleted file mode 100644 index dbc29cb2d..000000000 --- a/autoapi/fairchem/core/common/index.html +++ /dev/null @@ -1,652 +0,0 @@ - - - - - - - - - - - fairchem.core.common — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/logger/index.html b/autoapi/fairchem/core/common/logger/index.html deleted file mode 100644 index 28899e782..000000000 --- a/autoapi/fairchem/core/common/logger/index.html +++ /dev/null @@ -1,775 +0,0 @@ - - - - - - - - - - - fairchem.core.common.logger — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.logger#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

Logger

Generic class to interface with various logging modules, e.g. wandb,

WandBLogger

Generic class to interface with various logging modules, e.g. wandb,

TensorboardLogger

Generic class to interface with various logging modules, e.g. wandb,

-
-
-class fairchem.core.common.logger.Logger(config)#
-

Bases: abc.ABC

-

Generic class to interface with various logging modules, e.g. wandb, -tensorboard, etc.

-
-
-abstract watch(model)#
-

Monitor parameters and gradients.

-
- -
-
-log(update_dict, step: int, split: str = '')#
-

Log some values.

-
- -
-
-abstract log_plots(plots) None#
-
- -
-
-abstract mark_preempting() None#
-
- -
- -
-
-class fairchem.core.common.logger.WandBLogger(config)#
-

Bases: Logger

-

Generic class to interface with various logging modules, e.g. wandb, -tensorboard, etc.

-
-
-watch(model) None#
-

Monitor parameters and gradients.

-
- -
-
-log(update_dict, step: int, split: str = '') None#
-

Log some values.

-
- -
-
-log_plots(plots, caption: str = '') None#
-
- -
-
-mark_preempting() None#
-
- -
- -
-
-class fairchem.core.common.logger.TensorboardLogger(config)#
-

Bases: Logger

-

Generic class to interface with various logging modules, e.g. wandb, -tensorboard, etc.

-
-
-watch(model) bool#
-

Monitor parameters and gradients.

-
- -
-
-log(update_dict, step: int, split: str = '')#
-

Log some values.

-
- -
-
-mark_preempting() None#
-
- -
-
-log_plots(plots) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/registry/index.html b/autoapi/fairchem/core/common/registry/index.html deleted file mode 100644 index 5b70dd714..000000000 --- a/autoapi/fairchem/core/common/registry/index.html +++ /dev/null @@ -1,944 +0,0 @@ - - - - - - - - - - - fairchem.core.common.registry — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.registry#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

# Copyright (c) Meta, Inc. and its affiliates. -# Borrowed from facebookresearch/pythia.

-

Registry is central source of truth. Inspired from Redux’s concept of -global store, Registry maintains mappings of various information to unique -keys. Special functions in registry can be used as decorators to register -different kind of classes.

-

Import the global registry object using

-

from fairchem.core.common.registry import registry

-

Various decorators for registry different kind of classes with unique keys

-
    -
  • Register a model: @registry.register_model

  • -
-
-

Module Contents#

-
-

Classes#

- - - - - - -

Registry

Class for registry object which acts as central source of truth.

-
-
-

Functions#

- - - - - - -

_get_absolute_mapping(name)

-
-
-

Attributes#

- - - - - - - - - - - - -

R

NestedDict

registry

-
-
-fairchem.core.common.registry.R#
-
- -
-
-fairchem.core.common.registry.NestedDict#
-
- -
-
-fairchem.core.common.registry._get_absolute_mapping(name: str)#
-
- -
-
-class fairchem.core.common.registry.Registry#
-

Class for registry object which acts as central source of truth.

-
-
-mapping: ClassVar[NestedDict]#
-
- -
-
-classmethod register_task(name: str)#
-

Register a new task to registry with key ‘name’ -:param name: Key with which the task will be registered.

-
-
Usage::

from fairchem.core.common.registry import registry -from fairchem.core.tasks import BaseTask -@registry.register_task(“train”) -class TrainTask(BaseTask):

-
-

-
-
-
-
- -
-
-classmethod register_dataset(name: str)#
-

Register a dataset to registry with key ‘name’

-
-
Parameters:
-

name – Key with which the dataset will be registered.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-from fairchem.core.datasets import BaseDataset
-
-@registry.register_dataset("qm9")
-class QM9(BaseDataset):
-    ...
-
-
-
- -
-
-classmethod register_model(name: str)#
-

Register a model to registry with key ‘name’

-
-
Parameters:
-

name – Key with which the model will be registered.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-from fairchem.core.modules.layers import CGCNNConv
-
-@registry.register_model("cgcnn")
-class CGCNN():
-    ...
-
-
-
- -
-
-classmethod register_logger(name: str)#
-

Register a logger to registry with key ‘name’

-
-
Parameters:
-

name – Key with which the logger will be registered.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-
-@registry.register_logger("wandb")
-class WandBLogger():
-    ...
-
-
-
- -
-
-classmethod register_trainer(name: str)#
-

Register a trainer to registry with key ‘name’

-
-
Parameters:
-

name – Key with which the trainer will be registered.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-
-@registry.register_trainer("active_discovery")
-class ActiveDiscoveryTrainer():
-    ...
-
-
-
- -
-
-classmethod register(name: str, obj) None#
-

Register an item to registry with key ‘name’

-
-
Parameters:
-

name – Key with which the item will be registered.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-
-registry.register("config", {})
-
-
-
- -
-
-classmethod __import_error(name: str, mapping_name: str) RuntimeError#
-
- -
-
-classmethod get_class(name: str, mapping_name: str)#
-
- -
-
-classmethod get_task_class(name: str)#
-
- -
-
-classmethod get_dataset_class(name: str)#
-
- -
-
-classmethod get_model_class(name: str)#
-
- -
-
-classmethod get_logger_class(name: str)#
-
- -
-
-classmethod get_trainer_class(name: str)#
-
- -
-
-classmethod get(name: str, default=None, no_warning: bool = False)#
-

Get an item from registry with key ‘name’

-
-
Parameters:
-
    -
  • name (string) – Key whose value needs to be retrieved.

  • -
  • default – If passed and key is not in registry, default value will -be returned with a warning. Default: None

  • -
  • no_warning (bool) – If passed as True, warning when key doesn’t exist -will not be generated. Useful for cgcnn’s -internal operations. Default: False

  • -
-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-
-config = registry.get("config")
-
-
-
- -
-
-classmethod unregister(name: str)#
-

Remove an item from registry with key ‘name’

-
-
Parameters:
-

name – Key which needs to be removed.

-
-
-

Usage:

-
from fairchem.core.common.registry import registry
-
-config = registry.unregister("config")
-
-
-
- -
- -
-
-fairchem.core.common.registry.registry#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/relaxation/ase_utils/index.html b/autoapi/fairchem/core/common/relaxation/ase_utils/index.html deleted file mode 100644 index 23e036a24..000000000 --- a/autoapi/fairchem/core/common/relaxation/ase_utils/index.html +++ /dev/null @@ -1,734 +0,0 @@ - - - - - - - - - - - fairchem.core.common.relaxation.ase_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.relaxation.ase_utils

- -
- -
-
- - - - -
- -
-

fairchem.core.common.relaxation.ase_utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

Utilities to interface OCP models/trainers with the Atomic Simulation -Environment (ASE)

-
-

Module Contents#

-
-

Classes#

- - - - - - -

OCPCalculator

Base-class for all ASE calculators.

-
-
-

Functions#

- - - - - - -

batch_to_atoms(batch)

-
-
-fairchem.core.common.relaxation.ase_utils.batch_to_atoms(batch)#
-
- -
-
-class fairchem.core.common.relaxation.ase_utils.OCPCalculator(config_yml: str | None = None, checkpoint_path: str | None = None, model_name: str | None = None, local_cache: str | None = None, trainer: str | None = None, cutoff: int = 6, max_neighbors: int = 50, cpu: bool = True, seed: int | None = None)#
-

Bases: ase.calculators.calculator.Calculator

-

Base-class for all ASE calculators.

-

A calculator must raise PropertyNotImplementedError if asked for a -property that it can’t calculate. So, if calculation of the -stress tensor has not been implemented, get_stress(atoms) should -raise PropertyNotImplementedError. This can be achieved simply by not -including the string ‘stress’ in the list implemented_properties -which is a class member. These are the names of the standard -properties: ‘energy’, ‘forces’, ‘stress’, ‘dipole’, ‘charges’, -‘magmom’ and ‘magmoms’.

-
-
-implemented_properties: ClassVar[list[str]] = ['energy', 'forces']#
-
- -
-
-load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
-

Load existing trained model

-
-
Parameters:
-

checkpoint_path – string -Path to trained model

-
-
-
- -
-
-calculate(atoms: ase.Atoms, properties, system_changes) None#
-

Do the calculation.

-
-
properties: list of str

List of what needs to be calculated. Can be any combination -of ‘energy’, ‘forces’, ‘stress’, ‘dipole’, ‘charges’, ‘magmom’ -and ‘magmoms’.

-
-
system_changes: list of str

List of what has changed since last calculation. Can be -any combination of these six: ‘positions’, ‘numbers’, ‘cell’, -‘pbc’, ‘initial_charges’ and ‘initial_magmoms’.

-
-
-

Subclasses need to implement this, but can ignore properties -and system_changes if they want. Calculated properties should -be inserted into results dictionary like shown in this dummy -example:

-
self.results = {'energy': 0.0,
-                'forces': np.zeros((len(atoms), 3)),
-                'stress': np.zeros(6),
-                'dipole': np.zeros(3),
-                'charges': np.zeros(len(atoms)),
-                'magmom': 0.0,
-                'magmoms': np.zeros(len(atoms))}
-
-
-

The subclass implementation should first call this -implementation to set the atoms attribute and create any missing -directories.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/relaxation/index.html b/autoapi/fairchem/core/common/relaxation/index.html deleted file mode 100644 index 26e08f199..000000000 --- a/autoapi/fairchem/core/common/relaxation/index.html +++ /dev/null @@ -1,633 +0,0 @@ - - - - - - - - - - - fairchem.core.common.relaxation — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.relaxation

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common.relaxation#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.html b/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.html deleted file mode 100644 index c9438ed76..000000000 --- a/autoapi/fairchem/core/common/relaxation/ml_relaxation/index.html +++ /dev/null @@ -1,664 +0,0 @@ - - - - - - - - - - - fairchem.core.common.relaxation.ml_relaxation — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.relaxation.ml_relaxation

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common.relaxation.ml_relaxation#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - -

ml_relax(batch, model, steps, fmax, relax_opt, ...[, ...])

Runs ML-based relaxations.

-
-
-fairchem.core.common.relaxation.ml_relaxation.ml_relax(batch, model, steps: int, fmax: float, relax_opt, save_full_traj, device: str = 'cuda:0', transform=None, early_stop_batch: bool = False)#
-

Runs ML-based relaxations. -:param batch: object -:param model: object -:param steps: int

-
-

Max number of steps in the structure relaxation.

-
-
-
Parameters:
-
    -
  • fmax – float -Structure relaxation terminates when the max force -of the system is no bigger than fmax.

  • -
  • relax_opt – str -Optimizer and corresponding parameters to be used for structure relaxations.

  • -
  • save_full_traj – bool -Whether to save out the full ASE trajectory. If False, only save out initial and final frames.

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/relaxation/optimizers/index.html b/autoapi/fairchem/core/common/relaxation/optimizers/index.html deleted file mode 100644 index 57a3d1e5c..000000000 --- a/autoapi/fairchem/core/common/relaxation/optimizers/index.html +++ /dev/null @@ -1,619 +0,0 @@ - - - - - - - - - - - fairchem.core.common.relaxation.optimizers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.relaxation.optimizers

- -
-
- -
-

Contents

-
- -
-
-
- - - - - - - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.html b/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.html deleted file mode 100644 index 949ee4e15..000000000 --- a/autoapi/fairchem/core/common/relaxation/optimizers/lbfgs_torch/index.html +++ /dev/null @@ -1,718 +0,0 @@ - - - - - - - - - - - fairchem.core.common.relaxation.optimizers.lbfgs_torch — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.relaxation.optimizers.lbfgs_torch#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

LBFGS

TorchCalc

-
-
-class fairchem.core.common.relaxation.optimizers.lbfgs_torch.LBFGS(batch: torch_geometric.data.Batch, model: TorchCalc, maxstep: float = 0.01, memory: int = 100, damping: float = 0.25, alpha: float = 100.0, force_consistent=None, device: str = 'cuda:0', save_full_traj: bool = True, traj_dir: pathlib.Path | None = None, traj_names=None, early_stop_batch: bool = False)#
-
-
-get_energy_and_forces(apply_constraint: bool = True)#
-
- -
-
-set_positions(update, update_mask) None#
-
- -
-
-check_convergence(iteration, forces=None, energy=None)#
-
- -
-
-run(fmax, steps)#
-
- -
-
-step(iteration: int, forces: torch.Tensor | None, update_mask: torch.Tensor) None#
-
- -
-
-write(energy, forces, update_mask) None#
-
- -
- -
-
-class fairchem.core.common.relaxation.optimizers.lbfgs_torch.TorchCalc(model, transform=None)#
-
-
-get_energy_and_forces(atoms, apply_constraint: bool = True)#
-
- -
-
-update_graph(atoms)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/transforms/index.html b/autoapi/fairchem/core/common/transforms/index.html deleted file mode 100644 index a85b27050..000000000 --- a/autoapi/fairchem/core/common/transforms/index.html +++ /dev/null @@ -1,678 +0,0 @@ - - - - - - - - - - - fairchem.core.common.transforms — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.transforms

- -
- -
-
- - - - -
- -
-

fairchem.core.common.transforms#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

RandomRotate

Rotates node positions around a specific axis by a randomly sampled

-
-
-class fairchem.core.common.transforms.RandomRotate(degrees, axes: list[int] | None = None)#
-

Rotates node positions around a specific axis by a randomly sampled -factor within a given interval.

-
-
Parameters:
-
    -
  • degrees (tuple or float) – Rotation interval from which the rotation -angle is sampled. If degrees is a number instead of a -tuple, the interval is given by \([-\mathrm{degrees}, -\mathrm{degrees}]\).

  • -
  • axes (int, optional) – The rotation axes. (default: [0, 1, 2])

  • -
-
-
-
-
-__call__(data)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/tutorial_utils/index.html b/autoapi/fairchem/core/common/tutorial_utils/index.html deleted file mode 100644 index 05ba92375..000000000 --- a/autoapi/fairchem/core/common/tutorial_utils/index.html +++ /dev/null @@ -1,700 +0,0 @@ - - - - - - - - - - - fairchem.core.common.tutorial_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.tutorial_utils

- -
- -
-
- - - - -
- -
-

fairchem.core.common.tutorial_utils#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

ocp_root()

Return the root directory of the installed ocp package.

ocp_main()

Return the path to ocp main.py

describe_ocp()

Print some system information that could be useful in debugging.

train_test_val_split(ase_db[, ttv, files, seed])

Split an ase db into train, test and validation dbs.

generate_yml_config(checkpoint_path[, yml, delete, update])

Generate a yml config file from an existing checkpoint file.

-
-
-fairchem.core.common.tutorial_utils.ocp_root()#
-

Return the root directory of the installed ocp package.

-
- -
-
-fairchem.core.common.tutorial_utils.ocp_main()#
-

Return the path to ocp main.py

-
- -
-
-fairchem.core.common.tutorial_utils.describe_ocp()#
-

Print some system information that could be useful in debugging.

-
- -
-
-fairchem.core.common.tutorial_utils.train_test_val_split(ase_db, ttv=(0.8, 0.1, 0.1), files=('train.db', 'test.db', 'val.db'), seed=42)#
-

Split an ase db into train, test and validation dbs.

-

ase_db: path to an ase db containing all the data. -ttv: a tuple containing the fraction of train, test and val data. This will be normalized. -files: a tuple of filenames to write the splits into. An exception is raised if these exist.

-
-

You should delete them first.

-
-

seed: an integer for the random number generator seed

-

Returns the absolute path to files.

-
- -
-
-fairchem.core.common.tutorial_utils.generate_yml_config(checkpoint_path, yml='run.yml', delete=(), update=())#
-

Generate a yml config file from an existing checkpoint file.

-

checkpoint_path: string to path of an existing checkpoint -yml: name of file to write to. -pop: list of keys to remove from the config -update: dictionary of key:values to update

-

Use a dot notation in update.

-

Returns an absolute path to the generated yml file.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/typing/index.html b/autoapi/fairchem/core/common/typing/index.html deleted file mode 100644 index 4a63664c8..000000000 --- a/autoapi/fairchem/core/common/typing/index.html +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - - - - fairchem.core.common.typing — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.common.typing

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.common.typing#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

assert_is_instance(→ _T)

none_throws(→ _T)

-
-
-

Attributes#

- - - - - - -

_T

-
-
-fairchem.core.common.typing._T#
-
- -
-
-fairchem.core.common.typing.assert_is_instance(obj: object, cls: type[_T]) _T#
-
- -
-
-fairchem.core.common.typing.none_throws(x: _T | None, msg: str | None = None) _T#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/common/utils/index.html b/autoapi/fairchem/core/common/utils/index.html deleted file mode 100644 index 75132aad6..000000000 --- a/autoapi/fairchem/core/common/utils/index.html +++ /dev/null @@ -1,1155 +0,0 @@ - - - - - - - - - - - fairchem.core.common.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.common.utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

UniqueKeyLoader

Complete

SeverityLevelBetween

Filter instances are used to perform arbitrary filtering of LogRecords.

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

pyg2_data_transform(data)

if we're on the new pyg (2.0 or later) and if the Data stored is in older format

save_checkpoint(→ str)

warmup_lr_lambda(current_step, optim_config)

Returns a learning rate multiplier.

print_cuda_usage(→ None)

conditional_grad(dec)

Decorator to enable/disable grad depending on whether force/energy predictions are being made

plot_histogram(data[, xlabel, ylabel, title])

collate(data_list)

add_edge_distance_to_graph(batch[, device, dmin, ...])

_import_local_file(→ None)

Imports a Python file as a module

setup_experimental_imports(→ None)

Import selected directories of modules from the "experimental" subdirectory.

_get_project_root(→ pathlib.Path)

Gets the root folder of the project (the "ocp" folder)

setup_imports(→ None)

dict_set_recursively(→ None)

parse_value(value)

Parse string as Python literal if possible and fallback to string.

create_dict_from_args(args[, sep])

Create a (nested) dictionary from console arguments.

load_config(path[, previous_includes])

build_config(args, args_override)

create_grid(base_config, sweep_file)

save_experiment_log(args, jobs, configs)

get_pbc_distances(pos, edge_index, cell, cell_offsets, ...)

radius_graph_pbc(data, radius, max_num_neighbors_threshold)

get_max_neighbors_mask(natoms, index, atom_distance, ...)

Give a mask that filters out edges so that each atom has at most

get_pruned_edge_idx(→ torch.Tensor)

merge_dicts(dict1, dict2)

Recursively merge two dictionaries.

setup_logging(→ None)

compute_neighbors(data, edge_index)

check_traj_files(→ bool)

new_trainer_context(*, config[, distributed])

_resolve_scale_factor_submodule(model, name)

_report_incompat_keys(→ tuple[list[str], list[str]])

load_state_dict(→ tuple[list[str], list[str]])

scatter_det(*args, **kwargs)

get_commit_hash()

cg_change_mat(→ torch.tensor)

irreps_sum(→ int)

Returns the sum of the dimensions of the irreps up to the specified angular momentum.

update_config(base_config)

Configs created prior to OCP 2.0 are organized a little different than they

get_loss_module(loss_name)

-
-
-class fairchem.core.common.utils.UniqueKeyLoader(stream)#
-

Bases: yaml.SafeLoader

-
-
-construct_mapping(node, deep=False)#
-
- -
- -
-
-fairchem.core.common.utils.pyg2_data_transform(data: torch_geometric.data.Data)#
-

if we’re on the new pyg (2.0 or later) and if the Data stored is in older format -we need to convert the data to the new format

-
- -
-
-fairchem.core.common.utils.save_checkpoint(state, checkpoint_dir: str = 'checkpoints/', checkpoint_file: str = 'checkpoint.pt') str#
-
- -
-
-class fairchem.core.common.utils.Complete#
-
-
-__call__(data)#
-
- -
- -
-
-fairchem.core.common.utils.warmup_lr_lambda(current_step: int, optim_config)#
-

Returns a learning rate multiplier. -Till warmup_steps, learning rate linearly increases to initial_lr, -and then gets multiplied by lr_gamma every time a milestone is crossed.

-
- -
-
-fairchem.core.common.utils.print_cuda_usage() None#
-
- -
-
-fairchem.core.common.utils.conditional_grad(dec)#
-

Decorator to enable/disable grad depending on whether force/energy predictions are being made

-
- -
-
-fairchem.core.common.utils.plot_histogram(data, xlabel: str = '', ylabel: str = '', title: str = '')#
-
- -
-
-fairchem.core.common.utils.collate(data_list)#
-
- -
-
-fairchem.core.common.utils.add_edge_distance_to_graph(batch, device='cpu', dmin: float = 0.0, dmax: float = 6.0, num_gaussians: int = 50)#
-
- -
-
-fairchem.core.common.utils._import_local_file(path: pathlib.Path, *, project_root: pathlib.Path) None#
-

Imports a Python file as a module

-
-
Parameters:
-
    -
  • path (Path) – The path to the file to import

  • -
  • project_root (Path) – The root directory of the project (i.e., the “ocp” folder)

  • -
-
-
-
- -
-
-fairchem.core.common.utils.setup_experimental_imports(project_root: pathlib.Path) None#
-

Import selected directories of modules from the “experimental” subdirectory.

-

If a file named “.include” is present in the “experimental” subdirectory, -this will be read as a list of experimental subdirectories whose module -(including in any subsubdirectories) should be imported.

-
-
Parameters:
-

project_root – The root directory of the project (i.e., the “ocp” folder)

-
-
-
- -
-
-fairchem.core.common.utils._get_project_root() pathlib.Path#
-

Gets the root folder of the project (the “ocp” folder) -:return: The absolute path to the project root.

-
- -
-
-fairchem.core.common.utils.setup_imports(config: dict | None = None) None#
-
- -
-
-fairchem.core.common.utils.dict_set_recursively(dictionary, key_sequence, val) None#
-
- -
-
-fairchem.core.common.utils.parse_value(value)#
-

Parse string as Python literal if possible and fallback to string.

-
- -
-
-fairchem.core.common.utils.create_dict_from_args(args: list, sep: str = '.')#
-

Create a (nested) dictionary from console arguments. -Keys in different dictionary levels are separated by sep.

-
- -
-
-fairchem.core.common.utils.load_config(path: str, previous_includes: list | None = None)#
-
- -
-
-fairchem.core.common.utils.build_config(args, args_override)#
-
- -
-
-fairchem.core.common.utils.create_grid(base_config, sweep_file: str)#
-
- -
-
-fairchem.core.common.utils.save_experiment_log(args, jobs, configs)#
-
- -
-
-fairchem.core.common.utils.get_pbc_distances(pos, edge_index, cell, cell_offsets, neighbors, return_offsets: bool = False, return_distance_vec: bool = False)#
-
- -
-
-fairchem.core.common.utils.radius_graph_pbc(data, radius, max_num_neighbors_threshold, enforce_max_neighbors_strictly: bool = False, pbc=None)#
-
- -
-
-fairchem.core.common.utils.get_max_neighbors_mask(natoms, index, atom_distance, max_num_neighbors_threshold, degeneracy_tolerance: float = 0.01, enforce_max_strictly: bool = False)#
-

Give a mask that filters out edges so that each atom has at most -max_num_neighbors_threshold neighbors. -Assumes that index is sorted.

-

Enforcing the max strictly can force the arbitrary choice between -degenerate edges. This can lead to undesired behaviors; for -example, bulk formation energies which are not invariant to -unit cell choice.

-

A degeneracy tolerance can help prevent sudden changes in edge -existence from small changes in atom position, for example, -rounding errors, slab relaxation, temperature, etc.

-
- -
-
-fairchem.core.common.utils.get_pruned_edge_idx(edge_index, num_atoms: int, max_neigh: float = 1000000000.0) torch.Tensor#
-
- -
-
-fairchem.core.common.utils.merge_dicts(dict1: dict, dict2: dict)#
-

Recursively merge two dictionaries. -Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a -value, this will call itself recursively to merge these dictionaries. -This does not modify the input dictionaries (creates an internal copy). -Additionally returns a list of detected duplicates. -Adapted from TUM-DAML/seml

-
-
Parameters:
-
    -
  • dict1 (dict) – First dict.

  • -
  • dict2 (dict) – Second dict. Values in dict2 will override values from dict1 in case they share the same key.

  • -
-
-
Returns:
-

return_dict – Merged dictionaries.

-
-
Return type:
-

dict

-
-
-
- -
-
-class fairchem.core.common.utils.SeverityLevelBetween(min_level: int, max_level: int)#
-

Bases: logging.Filter

-

Filter instances are used to perform arbitrary filtering of LogRecords.

-

Loggers and Handlers can optionally use Filter instances to filter -records as desired. The base filter class only allows events which are -below a certain point in the logger hierarchy. For example, a filter -initialized with “A.B” will allow events logged by loggers “A.B”, -“A.B.C”, “A.B.C.D”, “A.B.D” etc. but not “A.BB”, “B.A.B” etc. If -initialized with the empty string, all events are passed.

-
-
-filter(record) bool#
-

Determine if the specified record is to be logged.

-

Returns True if the record should be logged, or False otherwise. -If deemed appropriate, the record may be modified in-place.

-
- -
- -
-
-fairchem.core.common.utils.setup_logging() None#
-
- -
-
-fairchem.core.common.utils.compute_neighbors(data, edge_index)#
-
- -
-
-fairchem.core.common.utils.check_traj_files(batch, traj_dir) bool#
-
- -
-
-fairchem.core.common.utils.new_trainer_context(*, config: dict[str, Any], distributed: bool = False)#
-
- -
-
-fairchem.core.common.utils._resolve_scale_factor_submodule(model: torch.nn.Module, name: str)#
-
- -
-
-fairchem.core.common.utils._report_incompat_keys(model: torch.nn.Module, keys: torch.nn.modules.module._IncompatibleKeys, strict: bool = False) tuple[list[str], list[str]]#
-
- -
-
-fairchem.core.common.utils.load_state_dict(module: torch.nn.Module, state_dict: collections.abc.Mapping[str, torch.Tensor], strict: bool = True) tuple[list[str], list[str]]#
-
- -
-
-fairchem.core.common.utils.scatter_det(*args, **kwargs)#
-
- -
-
-fairchem.core.common.utils.get_commit_hash()#
-
- -
-
-fairchem.core.common.utils.cg_change_mat(ang_mom: int, device: str = 'cpu') torch.tensor#
-
- -
-
-fairchem.core.common.utils.irreps_sum(ang_mom: int) int#
-

Returns the sum of the dimensions of the irreps up to the specified angular momentum.

-
-
Parameters:
-

ang_mom – max angular momenttum to sum up dimensions of irreps

-
-
-
- -
-
-fairchem.core.common.utils.update_config(base_config)#
-

Configs created prior to OCP 2.0 are organized a little different than they -are now. Update old configs to fit the new expected structure.

-
- -
-
-fairchem.core.common.utils.get_loss_module(loss_name)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/_utils/index.html b/autoapi/fairchem/core/datasets/_utils/index.html deleted file mode 100644 index 4a1410616..000000000 --- a/autoapi/fairchem/core/datasets/_utils/index.html +++ /dev/null @@ -1,653 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets._utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets._utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.datasets._utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - -

rename_data_object_keys(→ torch_geometric.data.Data)

Rename data object keys

-
-
-fairchem.core.datasets._utils.rename_data_object_keys(data_object: torch_geometric.data.Data, key_mapping: dict[str, str]) torch_geometric.data.Data#
-

Rename data object keys

-
-
Parameters:
-
    -
  • data_object – data object

  • -
  • key_mapping – dictionary specifying keys to rename and new names {prev_key: new_key}

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/ase_datasets/index.html b/autoapi/fairchem/core/datasets/ase_datasets/index.html deleted file mode 100644 index b7a005552..000000000 --- a/autoapi/fairchem/core/datasets/ase_datasets/index.html +++ /dev/null @@ -1,1038 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.ase_datasets — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.datasets.ase_datasets#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

AseAtomsDataset

This is an abstract Dataset that includes helpful utilities for turning

AseReadDataset

This Dataset uses ase.io.read to load data from a directory on disk.

AseReadMultiStructureDataset

This Dataset can read multiple structures from each file using ase.io.read.

AseDBDataset

This Dataset connects to an ASE Database, allowing the storage of atoms objects

-
-
-

Functions#

- - - - - - -

apply_one_tags(atoms[, skip_if_nonzero, skip_always])

This function will apply tags of 1 to an ASE atoms object.

-
-
-fairchem.core.datasets.ase_datasets.apply_one_tags(atoms: ase.Atoms, skip_if_nonzero: bool = True, skip_always: bool = False)#
-

This function will apply tags of 1 to an ASE atoms object. -It is used as an atoms_transform in the datasets contained in this file.

-

Certain models will treat atoms differently depending on their tags. -For example, GemNet-OC by default will only compute triplet and quadruplet interactions -for atoms with non-zero tags. This model throws an error if there are no tagged atoms. -For this reason, the default behavior is to tag atoms in structures with no tags.

-
-
Parameters:
-
    -
  • skip_if_nonzero (bool) – If at least one atom has a nonzero tag, do not tag any atoms

  • -
  • skip_always (bool) – Do not apply any tags. This arg exists so that this function can be disabled -without needing to pass a callable (which is currently difficult to do with main.py)

  • -
-
-
-
- -
-
-class fairchem.core.datasets.ase_datasets.AseAtomsDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: torch.utils.data.Dataset, abc.ABC

-

This is an abstract Dataset that includes helpful utilities for turning -ASE atoms objects into OCP-usable data objects. This should not be instantiated directly -as get_atoms_object and load_dataset_get_ids are not implemented in this base class.

-
-
Derived classes must add at least two things:

self.get_atoms_object(id): a function that takes an identifier and returns a corresponding atoms object

-
-
self.load_dataset_get_ids(config: dict): This function is responsible for any initialization/loads

of the dataset and importantly must return a list of all possible identifiers that can be passed into -self.get_atoms_object(id)

-
-
-
-
-

Identifiers need not be any particular type.

-
-
-__len__() int#
-
- -
-
-__getitem__(idx)#
-
- -
-
-abstract get_atoms(idx: str | int) ase.Atoms#
-
- -
-
-abstract _load_dataset_get_ids(config)#
-
- -
-
-abstract get_relaxed_energy(identifier)#
-
- -
-
-close_db() None#
-
- -
-
-get_metadata(num_samples: int = 100) dict#
-
- -
- -
-
-class fairchem.core.datasets.ase_datasets.AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset uses ase.io.read to load data from a directory on disk. -This is intended for small-scale testing and demonstrations of OCP. -Larger datasets are better served by the efficiency of other dataset types -such as LMDB.

-

For a full list of ASE-readable filetypes, see -https://wiki.fysik.dtu.dk/ase/ase/io/io.html

-
-
Parameters:
-
    -
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    -
    -
    pattern (str): Filepath matching each file you want to read

    ex. “/POSCAR”, “.cif”, “.xyz” -search recursively with two wildcards: “*/POSCAR” or “**/*.cif”

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the files you read (ex. OUTCAR)

    -
    -
    -

    ase_read_args (dict): Keyword arguments for ase.io.read()

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file -(e.g. the last frame of a .traj).

    -
    -
    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -

    transform_args (dict): Additional keyword arguments for the transform callable

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
-
-
-
-
-_load_dataset_get_ids(config) list[pathlib.Path]#
-
- -
-
-get_atoms(idx: str | int) ase.Atoms#
-
- -
-
-get_relaxed_energy(identifier) float#
-
- -
- -
-
-class fairchem.core.datasets.ase_datasets.AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset can read multiple structures from each file using ase.io.read. -The disadvantage is that all files must be read at startup. -This is a significant cost for large datasets.

-

This is intended for small-scale testing and demonstrations of OCP. -Larger datasets are better served by the efficiency of other dataset types -such as LMDB.

-

For a full list of ASE-readable filetypes, see -https://wiki.fysik.dtu.dk/ase/ase/io/io.html

-
-
Parameters:
-
    -
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    -
    -
    pattern (str): Filepath matching each file you want to read

    ex. “.traj”, “.xyz” -search recursively with two wildcards: “/POSCAR” or “/*.cif”

    -
    -
    index_file (str): Filepath to an indexing file, which contains each filename

    and the number of structures contained in each file. For instance:

    -

    /path/to/relaxation1.traj 200 -/path/to/relaxation2.traj 150

    -

    This will overrule the src and pattern that you specify!

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the files you read (ex. OUTCAR)

    -
    -
    -

    ase_read_args (dict): Keyword arguments for ase.io.read()

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file -(e.g. the last frame of a .traj).

    -
    -
    -

    use_tqdm (bool): Use TQDM progress bar when initializing dataset

    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -

    transform_args (dict): Additional keyword arguments for the transform callable

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
  • transform (callable, optional) – Additional preprocessing function for the Data object

  • -
-
-
-
-
-_load_dataset_get_ids(config) list[str]#
-
- -
-
-get_atoms(idx: str) ase.Atoms#
-
- -
-
-get_metadata(num_samples: int = 100) dict#
-
- -
-
-get_relaxed_energy(identifier) float#
-
- -
- -
-
-class fairchem.core.datasets.ase_datasets.AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset connects to an ASE Database, allowing the storage of atoms objects -with a variety of backends including JSON, SQLite, and database server options.

-

For more information, see: -https://databases.fysik.dtu.dk/ase/ase/db/db.html

-
-
Parameters:
-
    -
  • config (dict) –

    -
    src (str): Either
      -
    • the path an ASE DB,

    • -
    • the connection address of an ASE DB,

    • -
    • a folder with multiple ASE DBs,

    • -
    • a list of folders with ASE DBs

    • -
    • a glob string to use to find ASE DBs, or

    • -
    • a list of ASE db paths/addresses.

    • -
    -

    If a folder, every file will be attempted as an ASE DB, and warnings -are raised for any files that can’t connect cleanly

    -

    Note that for large datasets, ID loading can be slow and there can be many -ids, so it’s advised to make loading the id list as easy as possible. There is not -an obvious way to get a full list of ids from most ASE dbs besides simply looping -through the entire dataset. See the AseLMDBDataset which was written with this usecase -in mind.

    -
    -
    -

    connect_args (dict): Keyword arguments for ase.db.connect()

    -
    -
    select_args (dict): Keyword arguments for ase.db.select()

    You can use this to query/filter your database

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the database

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -
    -
    transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config}

    where config is a dictionary specifying arguments to the transform_function

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
  • transform (callable, optional) – deprecated?

  • -
-
-
-
-
-_load_dataset_get_ids(config: dict) list[int]#
-
- -
-
-get_atoms(idx: int) ase.Atoms#
-

Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. -:param idx: index in dataset -:type idx: int

-
-
Returns:
-

ASE atoms corresponding to datapoint idx

-
-
Return type:
-

atoms

-
-
-
- -
-
-static connect_db(address: str | pathlib.Path, connect_args: dict | None = None) ase.db.core.Database#
-
- -
-
-close_db() None#
-
- -
-
-get_metadata(num_samples: int = 100) dict#
-
- -
-
-abstract get_relaxed_energy(identifier)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.html b/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.html deleted file mode 100644 index cb7439562..000000000 --- a/autoapi/fairchem/core/datasets/embeddings/atomic_radii/index.html +++ /dev/null @@ -1,627 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.embeddings.atomic_radii — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.embeddings.atomic_radii

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.datasets.embeddings.atomic_radii#

-

Atomic radii in picometers

-

NaN stored for unavailable parameters.

-
-

Module Contents#

-
-
-fairchem.core.datasets.embeddings.atomic_radii.ATOMIC_RADII#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.html b/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.html deleted file mode 100644 index 2907170cf..000000000 --- a/autoapi/fairchem/core/datasets/embeddings/continuous_embeddings/index.html +++ /dev/null @@ -1,639 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.embeddings.continuous_embeddings — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.embeddings.continuous_embeddings

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.datasets.embeddings.continuous_embeddings#

-

CGCNN-like embeddings using continuous values instead of original k-hot.

-
-
Properties:

Group number -Period number -Electronegativity -Covalent radius -Valence electrons -First ionization energy -Electron affinity -Block -Atomic Volume

-
-
-

NaN stored for unavaialable parameters.

-
-

Module Contents#

-
-
-fairchem.core.datasets.embeddings.continuous_embeddings.CONTINUOUS_EMBEDDINGS#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/embeddings/index.html b/autoapi/fairchem/core/datasets/embeddings/index.html deleted file mode 100644 index 58c38e19b..000000000 --- a/autoapi/fairchem/core/datasets/embeddings/index.html +++ /dev/null @@ -1,659 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.embeddings — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.embeddings

- -
- -
-
- - - - -
- -
-

fairchem.core.datasets.embeddings#

-
-

Submodules#

- -
-
-

Package Contents#

-
-
-fairchem.core.datasets.embeddings.ATOMIC_RADII#
-
- -
-
-fairchem.core.datasets.embeddings.CONTINUOUS_EMBEDDINGS#
-
- -
-
-fairchem.core.datasets.embeddings.KHOT_EMBEDDINGS#
-
- -
-
-fairchem.core.datasets.embeddings.QMOF_KHOT_EMBEDDINGS#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.html b/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.html deleted file mode 100644 index fc6c5bf20..000000000 --- a/autoapi/fairchem/core/datasets/embeddings/khot_embeddings/index.html +++ /dev/null @@ -1,629 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.embeddings.khot_embeddings — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.embeddings.khot_embeddings

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.datasets.embeddings.khot_embeddings#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

Original CGCNN k-hot elemental embeddings.

-
-

Module Contents#

-
-
-fairchem.core.datasets.embeddings.khot_embeddings.KHOT_EMBEDDINGS#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.html b/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.html deleted file mode 100644 index 51bbfcfa7..000000000 --- a/autoapi/fairchem/core/datasets/embeddings/qmof_khot_embeddings/index.html +++ /dev/null @@ -1,631 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.embeddings.qmof_khot_embeddings — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.embeddings.qmof_khot_embeddings

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.datasets.embeddings.qmof_khot_embeddings#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

k-hot elemental embeddings from QMOF, motivated by the following Github Issue threads: -txie-93/cgcnn#2 -arosen93/QMOF#18

-
-

Module Contents#

-
-
-fairchem.core.datasets.embeddings.qmof_khot_embeddings.QMOF_KHOT_EMBEDDINGS#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/index.html b/autoapi/fairchem/core/datasets/index.html deleted file mode 100644 index 503f5c9db..000000000 --- a/autoapi/fairchem/core/datasets/index.html +++ /dev/null @@ -1,1304 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.datasets#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

AseDBDataset

This Dataset connects to an ASE Database, allowing the storage of atoms objects

AseReadDataset

This Dataset uses ase.io.read to load data from a directory on disk.

AseReadMultiStructureDataset

This Dataset can read multiple structures from each file using ase.io.read.

LMDBDatabase

Base class for all databases.

LmdbDataset

An abstract class representing a Dataset.

SinglePointLmdbDataset

An abstract class representing a Dataset.

TrajectoryLmdbDataset

An abstract class representing a Dataset.

OC22LmdbDataset

Dataset class to load from LMDB files containing relaxation

-
-
-

Functions#

- - - - - - -

data_list_collater(→ torch_geometric.data.data.BaseData)

-
-
-class fairchem.core.datasets.AseDBDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset connects to an ASE Database, allowing the storage of atoms objects -with a variety of backends including JSON, SQLite, and database server options.

-

For more information, see: -https://databases.fysik.dtu.dk/ase/ase/db/db.html

-
-
Parameters:
-
    -
  • config (dict) –

    -
    src (str): Either
      -
    • the path an ASE DB,

    • -
    • the connection address of an ASE DB,

    • -
    • a folder with multiple ASE DBs,

    • -
    • a list of folders with ASE DBs

    • -
    • a glob string to use to find ASE DBs, or

    • -
    • a list of ASE db paths/addresses.

    • -
    -

    If a folder, every file will be attempted as an ASE DB, and warnings -are raised for any files that can’t connect cleanly

    -

    Note that for large datasets, ID loading can be slow and there can be many -ids, so it’s advised to make loading the id list as easy as possible. There is not -an obvious way to get a full list of ids from most ASE dbs besides simply looping -through the entire dataset. See the AseLMDBDataset which was written with this usecase -in mind.

    -
    -
    -

    connect_args (dict): Keyword arguments for ase.db.connect()

    -
    -
    select_args (dict): Keyword arguments for ase.db.select()

    You can use this to query/filter your database

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the database

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -
    -
    transforms (dict[str, dict]): Dictionary specifying data transforms as {transform_function: config}

    where config is a dictionary specifying arguments to the transform_function

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
  • transform (callable, optional) – deprecated?

  • -
-
-
-
-
-_load_dataset_get_ids(config: dict) list[int]#
-
- -
-
-get_atoms(idx: int) ase.Atoms#
-

Get atoms object corresponding to datapoint idx. Useful to read other properties not in data object. -:param idx: index in dataset -:type idx: int

-
-
Returns:
-

ASE atoms corresponding to datapoint idx

-
-
Return type:
-

atoms

-
-
-
- -
-
-static connect_db(address: str | pathlib.Path, connect_args: dict | None = None) ase.db.core.Database#
-
- -
-
-close_db() None#
-
- -
-
-get_metadata(num_samples: int = 100) dict#
-
- -
-
-abstract get_relaxed_energy(identifier)#
-
- -
- -
-
-class fairchem.core.datasets.AseReadDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset uses ase.io.read to load data from a directory on disk. -This is intended for small-scale testing and demonstrations of OCP. -Larger datasets are better served by the efficiency of other dataset types -such as LMDB.

-

For a full list of ASE-readable filetypes, see -https://wiki.fysik.dtu.dk/ase/ase/io/io.html

-
-
Parameters:
-
    -
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    -
    -
    pattern (str): Filepath matching each file you want to read

    ex. “/POSCAR”, “.cif”, “.xyz” -search recursively with two wildcards: “*/POSCAR” or “**/*.cif”

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the files you read (ex. OUTCAR)

    -
    -
    -

    ase_read_args (dict): Keyword arguments for ase.io.read()

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file -(e.g. the last frame of a .traj).

    -
    -
    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -

    transform_args (dict): Additional keyword arguments for the transform callable

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
-
-
-
-
-_load_dataset_get_ids(config) list[pathlib.Path]#
-
- -
-
-get_atoms(idx: str | int) ase.Atoms#
-
- -
-
-get_relaxed_energy(identifier) float#
-
- -
- -
-
-class fairchem.core.datasets.AseReadMultiStructureDataset(config: dict, atoms_transform: Callable[[ase.Atoms, Any, Ellipsis], ase.Atoms] = apply_one_tags)#
-

Bases: AseAtomsDataset

-

This Dataset can read multiple structures from each file using ase.io.read. -The disadvantage is that all files must be read at startup. -This is a significant cost for large datasets.

-

This is intended for small-scale testing and demonstrations of OCP. -Larger datasets are better served by the efficiency of other dataset types -such as LMDB.

-

For a full list of ASE-readable filetypes, see -https://wiki.fysik.dtu.dk/ase/ase/io/io.html

-
-
Parameters:
-
    -
  • config (dict) –

    src (str): The source folder that contains your ASE-readable files

    -
    -
    pattern (str): Filepath matching each file you want to read

    ex. “.traj”, “.xyz” -search recursively with two wildcards: “/POSCAR” or “/*.cif”

    -
    -
    index_file (str): Filepath to an indexing file, which contains each filename

    and the number of structures contained in each file. For instance:

    -

    /path/to/relaxation1.traj 200 -/path/to/relaxation2.traj 150

    -

    This will overrule the src and pattern that you specify!

    -
    -
    a2g_args (dict): Keyword arguments for fairchem.core.preprocessing.AtomsToGraphs()

    default options will work for most users

    -

    If you are using this for a training dataset, set -“r_energy”:True, “r_forces”:True, and/or “r_stress”:True as appropriate -In that case, energy/forces must be in the files you read (ex. OUTCAR)

    -
    -
    -

    ase_read_args (dict): Keyword arguments for ase.io.read()

    -
    -
    keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need

    to iterate over a dataset many times (e.g. training for many epochs). -Not recommended for large datasets.

    -
    -
    include_relaxed_energy (bool): Include the relaxed energy in the resulting data object.

    The relaxed structure is assumed to be the final structure in the file -(e.g. the last frame of a .traj).

    -
    -
    -

    use_tqdm (bool): Use TQDM progress bar when initializing dataset

    -

    atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable

    -

    transform_args (dict): Additional keyword arguments for the transform callable

    -
    -
    key_mapping (dict[str, str]): Dictionary specifying a mapping between the name of a property used

    in the model with the corresponding property as it was named in the dataset. Only need to use if -the name is different.

    -
    -
    -

  • -
  • atoms_transform (callable, optional) – Additional preprocessing function applied to the Atoms -object. Useful for applying tags, for example.

  • -
  • transform (callable, optional) – Additional preprocessing function for the Data object

  • -
-
-
-
-
-_load_dataset_get_ids(config) list[str]#
-
- -
-
-get_atoms(idx: str) ase.Atoms#
-
- -
-
-get_metadata(num_samples: int = 100) dict#
-
- -
-
-get_relaxed_energy(identifier) float#
-
- -
- -
-
-class fairchem.core.datasets.LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs)#
-

Bases: ase.db.core.Database

-

Base class for all databases.

-
-
-property metadata#
-

Load the metadata from the DB if present

-
- -
-
-property _nextid#
-

Get the id of the next row to be written

-
- -
-
-__enter__() typing_extensions.Self#
-
- -
-
-__exit__(exc_type, exc_value, tb) None#
-
- -
-
-close() None#
-
- -
-
-_write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) None#
-
- -
-
-_update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None)#
-
- -
-
-_write_deleted_ids()#
-
- -
-
-delete(ids: list[int]) None#
-

Delete rows.

-
- -
-
-_get_row(idx: int, include_data: bool = True)#
-
- -
-
-_get_row_by_index(index: int, include_data: bool = True)#
-

Auxiliary function to get the ith entry, rather than a specific id

-
- -
-
-_select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all')#
-
- -
-
-count(selection=None, **kwargs) int#
-

Count rows.

-

See the select() method for the selection syntax. Use db.count() or -len(db) to count all rows.

-
- -
-
-_load_ids() None#
-

Load ids from the DB

-

Since ASE db ids are mostly 1-N integers, but can be missing entries -if ids have been deleted. To save space and operating under the assumption -that there will probably not be many deletions in most OCP datasets, -we just store the deleted ids.

-
- -
- -
-
-class fairchem.core.datasets.LmdbDataset(config)#
-

Bases: torch.utils.data.Dataset[T_co]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
-
-metadata_path: pathlib.Path#
-
- -
-
-sharded: bool#
-

Dataset class to load from LMDB files containing relaxation -trajectories or single point computations. -Useful for Structure to Energy & Force (S2EF), Initial State to -Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. -The keys in the LMDB must be integers (stored as ascii objects) starting -from 0 through the length of the LMDB. For historical reasons any key named -“length” is ignored since that was used to infer length of many lmdbs in the same -folder, but lmdb lengths are now calculated directly from the number of keys. -:param config: Dataset configuration -:type config: dict

-
- -
-
-__len__() int#
-
- -
-
-__getitem__(idx: int) T_co#
-
- -
-
-connect_db(lmdb_path: pathlib.Path | None = None) lmdb.Environment#
-
- -
-
-close_db() None#
-
- -
-
-get_metadata(num_samples: int = 100)#
-
- -
- -
-
-class fairchem.core.datasets.SinglePointLmdbDataset(config, transform=None)#
-

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
- -
-
-class fairchem.core.datasets.TrajectoryLmdbDataset(config, transform=None)#
-

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
- -
-
-fairchem.core.datasets.data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) torch_geometric.data.data.BaseData#
-
- -
-
-class fairchem.core.datasets.OC22LmdbDataset(config, transform=None)#
-

Bases: torch.utils.data.Dataset

-

Dataset class to load from LMDB files containing relaxation -trajectories or single point computations.

-

Useful for Structure to Energy & Force (S2EF), Initial State to -Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.

-

The keys in the LMDB must be integers (stored as ascii objects) starting -from 0 through the length of the LMDB. For historical reasons any key named -“length” is ignored since that was used to infer length of many lmdbs in the same -folder, but lmdb lengths are now calculated directly from the number of keys.

-
-
Parameters:
-
    -
  • config (dict) – Dataset configuration

  • -
  • transform (callable, optional) – Data transform function. -(default: None)

  • -
-
-
-
-
-__len__() int#
-
- -
-
-__getitem__(idx)#
-
- -
-
-connect_db(lmdb_path=None)#
-
- -
-
-close_db() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/lmdb_database/index.html b/autoapi/fairchem/core/datasets/lmdb_database/index.html deleted file mode 100644 index 2f31dedff..000000000 --- a/autoapi/fairchem/core/datasets/lmdb_database/index.html +++ /dev/null @@ -1,781 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.lmdb_database — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.datasets.lmdb_database#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is modified from the ASE db json backend -and is thus licensed under the corresponding LGPL2.1 license

-

The ASE notice for the LGPL2.1 license is available here: -ase/ase/-/blob/master/LICENSE

-
-

Module Contents#

-
-

Classes#

- - - - - - -

LMDBDatabase

Base class for all databases.

-
-
-

Attributes#

- - - - - - -

RESERVED_KEYS

-
-
-fairchem.core.datasets.lmdb_database.RESERVED_KEYS = ['nextid', 'metadata', 'deleted_ids']#
-
- -
-
-class fairchem.core.datasets.lmdb_database.LMDBDatabase(filename: str | pathlib.Path | None = None, create_indices: bool = True, use_lock_file: bool = False, serial: bool = False, readonly: bool = False, *args, **kwargs)#
-

Bases: ase.db.core.Database

-

Base class for all databases.

-
-
-property metadata#
-

Load the metadata from the DB if present

-
- -
-
-property _nextid#
-

Get the id of the next row to be written

-
- -
-
-__enter__() typing_extensions.Self#
-
- -
-
-__exit__(exc_type, exc_value, tb) None#
-
- -
-
-close() None#
-
- -
-
-_write(atoms: ase.Atoms | ase.db.row.AtomsRow, key_value_pairs: dict, data: dict | None, idx: int | None = None) None#
-
- -
-
-_update(idx: int, key_value_pairs: dict | None = None, data: dict | None = None)#
-
- -
-
-_write_deleted_ids()#
-
- -
-
-delete(ids: list[int]) None#
-

Delete rows.

-
- -
-
-_get_row(idx: int, include_data: bool = True)#
-
- -
-
-_get_row_by_index(index: int, include_data: bool = True)#
-

Auxiliary function to get the ith entry, rather than a specific id

-
- -
-
-_select(keys, cmps: list[tuple[str, str, str]], explain: bool = False, verbosity: int = 0, limit: int | None = None, offset: int = 0, sort: str | None = None, include_data: bool = True, columns: str = 'all')#
-
- -
-
-count(selection=None, **kwargs) int#
-

Count rows.

-

See the select() method for the selection syntax. Use db.count() or -len(db) to count all rows.

-
- -
-
-_load_ids() None#
-

Load ids from the DB

-

Since ASE db ids are mostly 1-N integers, but can be missing entries -if ids have been deleted. To save space and operating under the assumption -that there will probably not be many deletions in most OCP datasets, -we just store the deleted ids.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/lmdb_dataset/index.html b/autoapi/fairchem/core/datasets/lmdb_dataset/index.html deleted file mode 100644 index 6e3f62cdc..000000000 --- a/autoapi/fairchem/core/datasets/lmdb_dataset/index.html +++ /dev/null @@ -1,816 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.lmdb_dataset — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.datasets.lmdb_dataset#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

LmdbDataset

An abstract class representing a Dataset.

SinglePointLmdbDataset

An abstract class representing a Dataset.

TrajectoryLmdbDataset

An abstract class representing a Dataset.

-
-
-

Functions#

- - - - - - -

data_list_collater(→ torch_geometric.data.data.BaseData)

-
-
-

Attributes#

- - - - - - -

T_co

-
-
-fairchem.core.datasets.lmdb_dataset.T_co#
-
- -
-
-class fairchem.core.datasets.lmdb_dataset.LmdbDataset(config)#
-

Bases: torch.utils.data.Dataset[T_co]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
-
-metadata_path: pathlib.Path#
-
- -
-
-sharded: bool#
-

Dataset class to load from LMDB files containing relaxation -trajectories or single point computations. -Useful for Structure to Energy & Force (S2EF), Initial State to -Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks. -The keys in the LMDB must be integers (stored as ascii objects) starting -from 0 through the length of the LMDB. For historical reasons any key named -“length” is ignored since that was used to infer length of many lmdbs in the same -folder, but lmdb lengths are now calculated directly from the number of keys. -:param config: Dataset configuration -:type config: dict

-
- -
-
-__len__() int#
-
- -
-
-__getitem__(idx: int) T_co#
-
- -
-
-connect_db(lmdb_path: pathlib.Path | None = None) lmdb.Environment#
-
- -
-
-close_db() None#
-
- -
-
-get_metadata(num_samples: int = 100)#
-
- -
- -
-
-class fairchem.core.datasets.lmdb_dataset.SinglePointLmdbDataset(config, transform=None)#
-

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
- -
-
-class fairchem.core.datasets.lmdb_dataset.TrajectoryLmdbDataset(config, transform=None)#
-

Bases: LmdbDataset[torch_geometric.data.data.BaseData]

-

An abstract class representing a Dataset.

-

All datasets that represent a map from keys to data samples should subclass -it. All subclasses should overwrite __getitem__(), supporting fetching a -data sample for a given key. Subclasses could also optionally overwrite -__len__(), which is expected to return the size of the dataset by many -Sampler implementations and the default options -of DataLoader. Subclasses could also -optionally implement __getitems__(), for speedup batched samples -loading. This method accepts list of indices of samples of batch and returns -list of samples.

-
-

Note

-

DataLoader by default constructs an index -sampler that yields integral indices. To make it work with a map-style -dataset with non-integral indices/keys, a custom sampler must be provided.

-
-
- -
-
-fairchem.core.datasets.lmdb_dataset.data_list_collater(data_list: list[torch_geometric.data.data.BaseData], otf_graph: bool = False) torch_geometric.data.data.BaseData#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.html b/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.html deleted file mode 100644 index 0e8ed952d..000000000 --- a/autoapi/fairchem/core/datasets/oc22_lmdb_dataset/index.html +++ /dev/null @@ -1,694 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.oc22_lmdb_dataset — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.oc22_lmdb_dataset

- -
- -
-
- - - - -
- -
-

fairchem.core.datasets.oc22_lmdb_dataset#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

OC22LmdbDataset

Dataset class to load from LMDB files containing relaxation

-
-
-class fairchem.core.datasets.oc22_lmdb_dataset.OC22LmdbDataset(config, transform=None)#
-

Bases: torch.utils.data.Dataset

-

Dataset class to load from LMDB files containing relaxation -trajectories or single point computations.

-

Useful for Structure to Energy & Force (S2EF), Initial State to -Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.

-

The keys in the LMDB must be integers (stored as ascii objects) starting -from 0 through the length of the LMDB. For historical reasons any key named -“length” is ignored since that was used to infer length of many lmdbs in the same -folder, but lmdb lengths are now calculated directly from the number of keys.

-
-
Parameters:
-
    -
  • config (dict) – Dataset configuration

  • -
  • transform (callable, optional) – Data transform function. -(default: None)

  • -
-
-
-
-
-__len__() int#
-
- -
-
-__getitem__(idx)#
-
- -
-
-connect_db(lmdb_path=None)#
-
- -
-
-close_db() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/datasets/target_metadata_guesser/index.html b/autoapi/fairchem/core/datasets/target_metadata_guesser/index.html deleted file mode 100644 index 5ac7e7b11..000000000 --- a/autoapi/fairchem/core/datasets/target_metadata_guesser/index.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - fairchem.core.datasets.target_metadata_guesser — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.datasets.target_metadata_guesser

- -
- -
-
- - - - -
- -
-

fairchem.core.datasets.target_metadata_guesser#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - -

uniform_atoms_lengths(→ bool)

target_constant_shape(→ bool)

target_per_atom(→ bool)

target_extensive(atoms_lens, target_samples[, threshold])

guess_target_metadata(atoms_len, target_samples)

guess_property_metadata(atoms_list)

-
-
-fairchem.core.datasets.target_metadata_guesser.uniform_atoms_lengths(atoms_lens) bool#
-
- -
-
-fairchem.core.datasets.target_metadata_guesser.target_constant_shape(atoms_lens, target_samples) bool#
-
- -
-
-fairchem.core.datasets.target_metadata_guesser.target_per_atom(atoms_lens, target_samples) bool#
-
- -
-
-fairchem.core.datasets.target_metadata_guesser.target_extensive(atoms_lens, target_samples, threshold: float = 0.2)#
-
- -
-
-fairchem.core.datasets.target_metadata_guesser.guess_target_metadata(atoms_len, target_samples)#
-
- -
-
-fairchem.core.datasets.target_metadata_guesser.guess_property_metadata(atoms_list)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/index.html b/autoapi/fairchem/core/index.html deleted file mode 100644 index e53e39aa4..000000000 --- a/autoapi/fairchem/core/index.html +++ /dev/null @@ -1,810 +0,0 @@ - - - - - - - - - - - fairchem.core — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Subpackages#

-
- -
-
-
-

Submodules#

- -
-
-

Package Contents#

-
-
-fairchem.core.__version__#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/base/index.html b/autoapi/fairchem/core/models/base/index.html deleted file mode 100644 index a21fc0092..000000000 --- a/autoapi/fairchem/core/models/base/index.html +++ /dev/null @@ -1,709 +0,0 @@ - - - - - - - - - - - fairchem.core.models.base — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.base

- -
- -
-
- - - - -
- -
-

fairchem.core.models.base#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

BaseModel

Base class for all neural network modules.

-
-
-class fairchem.core.models.base.BaseModel(num_atoms=None, bond_feat_dim=None, num_targets=None)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-property num_params: int#
-
- -
-
-abstract forward(data)#
-
- -
-
-generate_graph(data, cutoff=None, max_neighbors=None, use_pbc=None, otf_graph=None, enforce_max_neighbors_strictly=None)#
-
- -
-
-no_weight_decay() list#
-

Returns a list of parameters with no weight decay.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/dimenet_plus_plus/index.html b/autoapi/fairchem/core/models/dimenet_plus_plus/index.html deleted file mode 100644 index 22a87d921..000000000 --- a/autoapi/fairchem/core/models/dimenet_plus_plus/index.html +++ /dev/null @@ -1,926 +0,0 @@ - - - - - - - - - - - fairchem.core.models.dimenet_plus_plus — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.dimenet_plus_plus#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

-

This code borrows heavily from the DimeNet implementation as part of -pytorch-geometric: rusty1s/pytorch_geometric. License:

-

-

Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>

-

Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Software”), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions:

-

The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software.

-

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

InteractionPPBlock

Base class for all neural network modules.

OutputPPBlock

Base class for all neural network modules.

DimeNetPlusPlus

DimeNet++ implementation based on klicperajo/dimenet.

DimeNetPlusPlusWrap

DimeNet++ implementation based on klicperajo/dimenet.

-
-
-

Attributes#

- - - - - - -

sym

-
-
-fairchem.core.models.dimenet_plus_plus.sym#
-
- -
-
-class fairchem.core.models.dimenet_plus_plus.InteractionPPBlock(hidden_channels: int, int_emb_size: int, basis_emb_size: int, num_spherical: int, num_radial: int, num_before_skip: int, num_after_skip: int, act='silu')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(x, rbf, sbf, idx_kj, idx_ji)#
-
- -
- -
-
-class fairchem.core.models.dimenet_plus_plus.OutputPPBlock(num_radial: int, hidden_channels: int, out_emb_channels: int, out_channels: int, num_layers: int, act: str = 'silu')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(x, rbf, i, num_nodes: int | None = None)#
-
- -
- -
-
-class fairchem.core.models.dimenet_plus_plus.DimeNetPlusPlus(hidden_channels: int, out_channels: int, num_blocks: int, int_emb_size: int, basis_emb_size: int, out_emb_channels: int, num_spherical: int, num_radial: int, cutoff: float = 5.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, act: str = 'silu')#
-

Bases: torch.nn.Module

-

DimeNet++ implementation based on klicperajo/dimenet.

-
-
Parameters:
-
    -
  • hidden_channels (int) – Hidden embedding size.

  • -
  • out_channels (int) – Size of each output sample.

  • -
  • num_blocks (int) – Number of building blocks.

  • -
  • int_emb_size (int) – Embedding size used for interaction triplets

  • -
  • basis_emb_size (int) – Embedding size used in the basis transformation

  • -
  • out_emb_channels (int) – Embedding size used for atoms in the output block

  • -
  • num_spherical (int) – Number of spherical harmonics.

  • -
  • num_radial (int) – Number of radial basis functions.

  • -
  • cutoff – (float, optional): Cutoff distance for interatomic -interactions. (default: 5.0)

  • -
  • envelope_exponent (int, optional) – Shape of the smooth cutoff. -(default: 5)

  • -
  • num_before_skip – (int, optional): Number of residual layers in the -interaction blocks before the skip connection. (default: 1)

  • -
  • num_after_skip – (int, optional): Number of residual layers in the -interaction blocks after the skip connection. (default: 2)

  • -
  • num_output_layers – (int, optional): Number of linear layers for the -output blocks. (default: 3)

  • -
  • act – (function, optional): The activation funtion. -(default: silu)

  • -
-
-
-
-
-url = 'https://github.com/klicperajo/dimenet/raw/master/pretrained'#
-
- -
-
-reset_parameters() None#
-
- -
-
-triplets(edge_index, cell_offsets, num_nodes: int)#
-
- -
-
-abstract forward(z, pos, batch=None)#
-
- -
- -
-
-class fairchem.core.models.dimenet_plus_plus.DimeNetPlusPlusWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, hidden_channels: int = 128, num_blocks: int = 4, int_emb_size: int = 64, basis_emb_size: int = 8, out_emb_channels: int = 256, num_spherical: int = 7, num_radial: int = 6, otf_graph: bool = False, cutoff: float = 10.0, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3)#
-

Bases: DimeNetPlusPlus, fairchem.core.models.base.BaseModel

-

DimeNet++ implementation based on klicperajo/dimenet.

-
-
Parameters:
-
    -
  • hidden_channels (int) – Hidden embedding size.

  • -
  • out_channels (int) – Size of each output sample.

  • -
  • num_blocks (int) – Number of building blocks.

  • -
  • int_emb_size (int) – Embedding size used for interaction triplets

  • -
  • basis_emb_size (int) – Embedding size used in the basis transformation

  • -
  • out_emb_channels (int) – Embedding size used for atoms in the output block

  • -
  • num_spherical (int) – Number of spherical harmonics.

  • -
  • num_radial (int) – Number of radial basis functions.

  • -
  • cutoff – (float, optional): Cutoff distance for interatomic -interactions. (default: 5.0)

  • -
  • envelope_exponent (int, optional) – Shape of the smooth cutoff. -(default: 5)

  • -
  • num_before_skip – (int, optional): Number of residual layers in the -interaction blocks before the skip connection. (default: 1)

  • -
  • num_after_skip – (int, optional): Number of residual layers in the -interaction blocks after the skip connection. (default: 2)

  • -
  • num_output_layers – (int, optional): Number of linear layers for the -output blocks. (default: 3)

  • -
  • act – (function, optional): The activation funtion. -(default: silu)

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-_forward(data)#
-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/activation/index.html b/autoapi/fairchem/core/models/equiformer_v2/activation/index.html deleted file mode 100644 index 01952945d..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/activation/index.html +++ /dev/null @@ -1,1113 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.activation — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.activation#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

ScaledSiLU

Base class for all neural network modules.

ScaledSwiGLU

Base class for all neural network modules.

SwiGLU

Base class for all neural network modules.

SmoothLeakyReLU

Base class for all neural network modules.

ScaledSmoothLeakyReLU

Base class for all neural network modules.

ScaledSigmoid

Base class for all neural network modules.

GateActivation

Base class for all neural network modules.

S2Activation

Assume we only have one resolution

SeparableS2Activation

Base class for all neural network modules.

-
-
-class fairchem.core.models.equiformer_v2.activation.ScaledSiLU(inplace: bool = False)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(inputs)#
-
- -
-
-extra_repr()#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.ScaledSwiGLU(in_channels: int, out_channels: int, bias: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(inputs)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.SwiGLU(in_channels: int, out_channels: int, bias: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(inputs)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.SmoothLeakyReLU(negative_slope: float = 0.2)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x)#
-
- -
-
-extra_repr()#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.ScaledSmoothLeakyReLU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x)#
-
- -
-
-extra_repr()#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.ScaledSigmoid#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.GateActivation(lmax: int, mmax: int, num_channels: int)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(gating_scalars, input_tensors)#
-

gating_scalars: shape [N, lmax * num_channels] -input_tensors: shape [N, (lmax + 1) ** 2, num_channels]

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.S2Activation(lmax: int, mmax: int)#
-

Bases: torch.nn.Module

-

Assume we only have one resolution

-
-
-forward(inputs, SO3_grid)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.activation.SeparableS2Activation(lmax: int, mmax: int)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input_scalars, input_tensors, SO3_grid)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/drop/index.html b/autoapi/fairchem/core/models/equiformer_v2/drop/index.html deleted file mode 100644 index 4ce305ef2..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/drop/index.html +++ /dev/null @@ -1,907 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.drop — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.drop#

-

Add extra_repr into DropPath implemented by timm -for displaying more info.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

DropPath

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

GraphDropPath

Consider batch for graph data when dropping paths.

EquivariantDropout

Base class for all neural network modules.

EquivariantScalarsDropout

Base class for all neural network modules.

EquivariantDropoutArraySphericalHarmonics

Base class for all neural network modules.

-
-
-

Functions#

- - - - - - -

drop_path(→ torch.Tensor)

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

-
-
-fairchem.core.models.equiformer_v2.drop.drop_path(x: torch.Tensor, drop_prob: float = 0.0, training: bool = False) torch.Tensor#
-

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). -This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, -the original name is misleading as ‘Drop Connect’ is a different form of dropout in a separate paper… -See discussion: tensorflow/tpu#494 … I’ve opted for -changing the layer and argument names to ‘drop path’ rather than mix DropConnect as a layer name and use -‘survival rate’ as the argument.

-
- -
-
-class fairchem.core.models.equiformer_v2.drop.DropPath(drop_prob: float)#
-

Bases: torch.nn.Module

-

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
-
-extra_repr() str#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.drop.GraphDropPath(drop_prob: float)#
-

Bases: torch.nn.Module

-

Consider batch for graph data when dropping paths.

-
-
-forward(x: torch.Tensor, batch) torch.Tensor#
-
- -
-
-extra_repr() str#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.drop.EquivariantDropout(irreps, drop_prob: float)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.drop.EquivariantScalarsDropout(irreps, drop_prob: float)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
-
-extra_repr() str#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.drop.EquivariantDropoutArraySphericalHarmonics(drop_prob: float, drop_graph: bool = False)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor, batch=None) torch.Tensor#
-
- -
-
-extra_repr() str#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.html b/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.html deleted file mode 100644 index e2ee4e73a..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/edge_rot_mat/index.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.edge_rot_mat — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.edge_rot_mat

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.edge_rot_mat#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

init_edge_rot_mat(edge_distance_vec)

-
-
-fairchem.core.models.equiformer_v2.edge_rot_mat.init_edge_rot_mat(edge_distance_vec)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.html b/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.html deleted file mode 100644 index dd3a9f30f..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/equiformer_v2_oc20/index.html +++ /dev/null @@ -1,775 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.equiformer_v2_oc20 — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.equiformer_v2_oc20#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

EquiformerV2_OC20

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

-
-
-

Attributes#

- - - - - - - - - -

_AVG_NUM_NODES

_AVG_DEGREE

-
-
-fairchem.core.models.equiformer_v2.equiformer_v2_oc20._AVG_NUM_NODES = 77.81317#
-
- -
-
-fairchem.core.models.equiformer_v2.equiformer_v2_oc20._AVG_DEGREE = 23.395238876342773#
-
- -
-
-class fairchem.core.models.equiformer_v2.equiformer_v2_oc20.EquiformerV2_OC20(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False)#
-

Bases: fairchem.core.models.base.BaseModel

-

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_neighbors (int) – Maximum number of neighbors per atom

  • -
  • max_radius (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_layers (int) – Number of layers in the GNN

  • -
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • -
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • -
  • num_heads (int) – Number of attention heads

  • -
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • -
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • -
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • -
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’, ‘rms_norm_sh’])

  • -
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • -
  • grid_resolution (int) – Resolution of SO3_Grid

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • edge_channels (int) – Number of channels for the edge invariant features

  • -
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • -
  • share_atom_edge_embedding (bool) – Whether to share atom_edge_embedding across all blocks

  • -
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • -
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • -
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • -
  • ffn_activation (str) – Type of activation function for feedforward network

  • -
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • -
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFNs.

  • -
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • -
  • alpha_drop (float) – Dropout rate for attention weights

  • -
  • drop_path_rate (float) – Drop path rate

  • -
  • proj_drop (float) – Dropout rate for outputs of attention and FFN in Transformer blocks

  • -
  • weight_init (str) – [‘normal’, ‘uniform’] initialization of weights of linear layers except those in radial functions

  • -
  • enforce_max_neighbors_strictly (bool) – When edges are subselected based on the max_neighbors arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number.

  • -
  • avg_num_nodes (float) – Average number of nodes per graph

  • -
  • avg_degree (float) – Average degree of nodes in the graph

  • -
  • use_energy_lin_ref (bool) – Whether to add the per-atom energy references during prediction. -During training and validation, this should be kept False since we use the lin_ref parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. -During prediction (where we don’t have energy targets), this can be set to True to add the per-atom linear references to the predicted energies.

  • -
  • load_energy_lin_ref (bool) – Whether to add nn.Parameters for the per-element energy references. -This additional flag is there to ensure compatibility when strict-loading checkpoints, since the use_energy_lin_ref flag can be either True or False even if the model is trained with linear references. -You can’t have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine.

  • -
-
-
-
-
-property num_params#
-
- -
-
-forward(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
-
-_init_weights(m)#
-
- -
-
-_uniform_init_rad_func_linear_weights(m)#
-
- -
-
-_uniform_init_linear_weights(m)#
-
- -
-
-no_weight_decay() set#
-

Returns a list of parameters with no weight decay.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.html b/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.html deleted file mode 100644 index b1f785bf7..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/gaussian_rbf/index.html +++ /dev/null @@ -1,714 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.gaussian_rbf — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.gaussian_rbf

- -
- -
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.gaussian_rbf#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

GaussianRadialBasisLayer

Base class for all neural network modules.

-
-
-

Functions#

- - - - - - -

gaussian(→ torch.Tensor)

-
-
-fairchem.core.models.equiformer_v2.gaussian_rbf.gaussian(x: torch.Tensor, mean, std) torch.Tensor#
-
- -
-
-class fairchem.core.models.equiformer_v2.gaussian_rbf.GaussianRadialBasisLayer(num_basis: int, cutoff: float)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist: torch.Tensor, node_atom=None, edge_src=None, edge_dst=None)#
-
- -
-
-extra_repr()#
-

Set the extra representation of the module.

-

To print customized extra information, you should re-implement -this method in your own modules. Both single-line and multi-line -strings are acceptable.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/index.html b/autoapi/fairchem/core/models/equiformer_v2/index.html deleted file mode 100644 index fb03607c3..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/index.html +++ /dev/null @@ -1,783 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2 — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

EquiformerV2

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

-
-
-class fairchem.core.models.equiformer_v2.EquiformerV2(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = True, max_neighbors: int = 500, max_radius: float = 5.0, max_num_elements: int = 90, num_layers: int = 12, sphere_channels: int = 128, attn_hidden_channels: int = 128, num_heads: int = 8, attn_alpha_channels: int = 32, attn_value_channels: int = 16, ffn_hidden_channels: int = 512, norm_type: str = 'rms_norm_sh', lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, grid_resolution: int | None = None, num_sphere_samples: int = 128, edge_channels: int = 128, use_atom_edge_embedding: bool = True, share_atom_edge_embedding: bool = False, use_m_share_rad: bool = False, distance_function: str = 'gaussian', num_distance_basis: int = 512, attn_activation: str = 'scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.1, drop_path_rate: float = 0.05, proj_drop: float = 0.0, weight_init: str = 'normal', enforce_max_neighbors_strictly: bool = True, avg_num_nodes: float | None = None, avg_degree: float | None = None, use_energy_lin_ref: bool | None = False, load_energy_lin_ref: bool | None = False)#
-

Bases: fairchem.core.models.base.BaseModel

-

Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_neighbors (int) – Maximum number of neighbors per atom

  • -
  • max_radius (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_layers (int) – Number of layers in the GNN

  • -
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • -
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • -
  • num_heads (int) – Number of attention heads

  • -
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • -
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • -
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • -
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’, ‘rms_norm_sh’])

  • -
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • -
  • grid_resolution (int) – Resolution of SO3_Grid

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • edge_channels (int) – Number of channels for the edge invariant features

  • -
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • -
  • share_atom_edge_embedding (bool) – Whether to share atom_edge_embedding across all blocks

  • -
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • -
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • -
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • -
  • ffn_activation (str) – Type of activation function for feedforward network

  • -
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • -
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFNs.

  • -
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • -
  • alpha_drop (float) – Dropout rate for attention weights

  • -
  • drop_path_rate (float) – Drop path rate

  • -
  • proj_drop (float) – Dropout rate for outputs of attention and FFN in Transformer blocks

  • -
  • weight_init (str) – [‘normal’, ‘uniform’] initialization of weights of linear layers except those in radial functions

  • -
  • enforce_max_neighbors_strictly (bool) – When edges are subselected based on the max_neighbors arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number.

  • -
  • avg_num_nodes (float) – Average number of nodes per graph

  • -
  • avg_degree (float) – Average degree of nodes in the graph

  • -
  • use_energy_lin_ref (bool) – Whether to add the per-atom energy references during prediction. -During training and validation, this should be kept False since we use the lin_ref parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets. -During prediction (where we don’t have energy targets), this can be set to True to add the per-atom linear references to the predicted energies.

  • -
  • load_energy_lin_ref (bool) – Whether to add nn.Parameters for the per-element energy references. -This additional flag is there to ensure compatibility when strict-loading checkpoints, since the use_energy_lin_ref flag can be either True or False even if the model is trained with linear references. -You can’t have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine.

  • -
-
-
-
-
-property num_params#
-
- -
-
-forward(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
-
-_init_weights(m)#
-
- -
-
-_uniform_init_rad_func_linear_weights(m)#
-
- -
-
-_uniform_init_linear_weights(m)#
-
- -
-
-no_weight_decay() set#
-

Returns a list of parameters with no weight decay.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/input_block/index.html b/autoapi/fairchem/core/models/equiformer_v2/input_block/index.html deleted file mode 100644 index 6b927522e..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/input_block/index.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.input_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.input_block

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.input_block#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

EdgeDegreeEmbedding

-
param sphere_channels:
-

Number of spherical channels

-
-
-

-
-
-class fairchem.core.models.equiformer_v2.input_block.EdgeDegreeEmbedding(sphere_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool, rescale_factor)#
-

Bases: torch.nn.Module

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • -
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. -The last one will be used as hidden size when use_atom_edge_embedding is True.

  • -
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • -
  • rescale_factor (float) – Rescale the sum aggregation

  • -
-
-
-
-
-forward(atomic_numbers, edge_distance, edge_index)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.html b/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.html deleted file mode 100644 index eaefb886c..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/layer_norm/index.html +++ /dev/null @@ -1,877 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.layer_norm — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.layer_norm#

-

1. Normalize features of shape (N, sphere_basis, C), -with sphere_basis = (lmax + 1) ** 2.

-

2. The difference from layer_norm.py is that all type-L vectors have -the same number of channels and input features are of shape (N, sphere_basis, C).

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

EquivariantLayerNormArray

Base class for all neural network modules.

EquivariantLayerNormArraySphericalHarmonics

    -
  1. Normalize over L = 0.

  2. -
-

EquivariantRMSNormArraySphericalHarmonics

    -
  1. Normalize across all m components from degrees L >= 0.

  2. -
-

EquivariantRMSNormArraySphericalHarmonicsV2

    -
  1. Normalize across all m components from degrees L >= 0.

  2. -
-

EquivariantDegreeLayerScale

    -
  1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV'21)), we scale the output of both attention and FFN.

  2. -
-

-
-
-

Functions#

- - - - - - - - - -

get_normalization_layer(norm_type, lmax, num_channels)

get_l_to_all_m_expand_index(lmax)

-
-
-fairchem.core.models.equiformer_v2.layer_norm.get_normalization_layer(norm_type: str, lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
-
- -
-
-fairchem.core.models.equiformer_v2.layer_norm.get_l_to_all_m_expand_index(lmax: int)#
-
- -
-
-class fairchem.core.models.equiformer_v2.layer_norm.EquivariantLayerNormArray(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-__repr__() str#
-

Return repr(self).

-
- -
-
-forward(node_input)#
-

Assume input is of shape [N, sphere_basis, C]

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.layer_norm.EquivariantLayerNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', std_balance_degrees: bool = True)#
-

Bases: torch.nn.Module

-
    -
  1. Normalize over L = 0.

  2. -
  3. Normalize across all m components from degrees L > 0.

  4. -
  5. Do not normalize separately for different L (L > 0).

  6. -
-
-
-__repr__() str#
-

Return repr(self).

-
- -
-
-forward(node_input)#
-

Assume input is of shape [N, sphere_basis, C]

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonics(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component')#
-

Bases: torch.nn.Module

-
    -
  1. Normalize across all m components from degrees L >= 0.

  2. -
-
-
-__repr__() str#
-

Return repr(self).

-
- -
-
-forward(node_input)#
-

Assume input is of shape [N, sphere_basis, C]

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.layer_norm.EquivariantRMSNormArraySphericalHarmonicsV2(lmax: int, num_channels: int, eps: float = 1e-05, affine: bool = True, normalization: str = 'component', centering: bool = True, std_balance_degrees: bool = True)#
-

Bases: torch.nn.Module

-
    -
  1. Normalize across all m components from degrees L >= 0.

  2. -
  3. Expand weights and multiply with normalized feature to prevent slicing and concatenation.

  4. -
-
-
-__repr__() str#
-

Return repr(self).

-
- -
-
-forward(node_input)#
-

Assume input is of shape [N, sphere_basis, C]

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.layer_norm.EquivariantDegreeLayerScale(lmax: int, num_channels: int, scale_factor: float = 2.0)#
-

Bases: torch.nn.Module

-
    -
  1. Similar to Layer Scale used in CaiT (Going Deeper With Image Transformers (ICCV’21)), we scale the output of both attention and FFN.

  2. -
  3. For degree L > 0, we scale down the square root of 2 * L, which is to emulate halving the number of channels when using higher L.

  4. -
-
-
-__repr__() str#
-

Return repr(self).

-
- -
-
-forward(node_input)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/module_list/index.html b/autoapi/fairchem/core/models/equiformer_v2/module_list/index.html deleted file mode 100644 index 4ca241b43..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/module_list/index.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.module_list — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.module_list

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.module_list#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

ModuleListInfo

Holds submodules in a list.

-
-
-class fairchem.core.models.equiformer_v2.module_list.ModuleListInfo(info_str, modules=None)#
-

Bases: torch.nn.ModuleList

-

Holds submodules in a list.

-

ModuleList can be indexed like a regular Python list, but -modules it contains are properly registered, and will be visible by all -Module methods.

-
-
Parameters:
-

modules (iterable, optional) – an iterable of modules to add

-
-
-

Example:

-
class MyModule(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
-
-    def forward(self, x):
-        # ModuleList can act as an iterable, or be indexed using ints
-        for i, l in enumerate(self.linears):
-            x = self.linears[i // 2](x) + l(x)
-        return x
-
-
-
-
-__repr__() str#
-

Return a custom repr for ModuleList that compresses repeated module representations.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.html b/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.html deleted file mode 100644 index 9d7f41f28..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/radial_function/index.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.radial_function — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.radial_function

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.radial_function#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

RadialFunction

Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels

-
-
-class fairchem.core.models.equiformer_v2.radial_function.RadialFunction(channels_list)#
-

Bases: torch.nn.Module

-

Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels

-
-
-forward(inputs)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.html b/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.html deleted file mode 100644 index 913b55841..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/so2_ops/index.html +++ /dev/null @@ -1,738 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.so2_ops — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.so2_ops

- -
- -
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.so2_ops#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

SO2_m_Convolution

SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m

SO2_Convolution

SO(2) Block: Perform SO(2) convolutions for all m (orders)

SO2_Linear

SO(2) Linear: Perform SO(2) linear for all m (orders).

-
-
-class fairchem.core.models.equiformer_v2.so2_ops.SO2_m_Convolution(m: int, sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int])#
-

Bases: torch.nn.Module

-

SO(2) Conv: Perform an SO(2) convolution on features corresponding to +- m

-
-
Parameters:
-
    -
  • m (int) – Order of the spherical harmonic coefficients

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
-
-
-
-
-forward(x_m)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so2_ops.SO2_Convolution(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = True, edge_channels_list: list[int] | None = None, extra_m0_output_channels: int | None = None)#
-

Bases: torch.nn.Module

-

SO(2) Block: Perform SO(2) convolutions for all m (orders)

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • -
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • mappingReduced (CoefficientMappingModule) – Used to extract a subset of m components

  • -
  • internal_weights (bool) – If True, not using radial function to multiply inputs features

  • -
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels].

  • -
  • extra_m0_output_channels (int) – If not None, return out_embedding (SO3_Embedding) and extra_m0_features (Tensor).

  • -
-
-
-
-
-forward(x, x_edge)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so2_ops.SO2_Linear(sphere_channels: int, m_output_channels: int, lmax_list: list[int], mmax_list: list[int], mappingReduced, internal_weights: bool = False, edge_channels_list: list[int] | None = None)#
-

Bases: torch.nn.Module

-

SO(2) Linear: Perform SO(2) linear for all m (orders).

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • m_output_channels (int) – Number of output channels used during the SO(2) conv

  • -
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • mappingReduced (CoefficientMappingModule) – Used to extract a subset of m components

  • -
  • internal_weights (bool) – If True, not using radial function to multiply inputs features

  • -
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels].

  • -
-
-
-
-
-forward(x, x_edge)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/so3/index.html b/autoapi/fairchem/core/models/equiformer_v2/so3/index.html deleted file mode 100644 index f71dbbac8..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/so3/index.html +++ /dev/null @@ -1,1029 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.so3 — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.so3#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - -

CoefficientMappingModule

Helper module for coefficients used to reshape lval <--> m and to get coefficients of specific degree or order

SO3_Embedding

Helper functions for performing operations on irreps embedding

SO3_Rotation

Helper functions for Wigner-D rotations

SO3_Grid

Helper functions for grid representation of the irreps

SO3_Linear

Base class for all neural network modules.

SO3_LinearV2

Base class for all neural network modules.

-
-
-class fairchem.core.models.equiformer_v2.so3.CoefficientMappingModule(lmax_list: list[int], mmax_list: list[int])#
-

Bases: torch.nn.Module

-

Helper module for coefficients used to reshape lval <–> m and to get coefficients of specific degree or order

-
-
Parameters:
-
    -
  • (list (mmax_list) – int): List of maximum degree of the spherical harmonics

  • -
  • (list – int): List of maximum order of the spherical harmonics

  • -
-
-
-
-
-complex_idx(m: int, lmax: int, m_complex, l_harmonic)#
-

Add m_complex and l_harmonic to the input arguments -since we cannot use self.m_complex.

-
- -
-
-coefficient_idx(lmax: int, mmax: int)#
-
- -
-
-get_rotate_inv_rescale(lmax: int, mmax: int)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so3.SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype)#
-

Helper functions for performing operations on irreps embedding

-
-
Parameters:
-
    -
  • length (int) – Batch size

  • -
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • -
  • num_channels (int) – Number of channels

  • -
  • device – Device of the output

  • -
  • dtype – type of the output tensors

  • -
-
-
-
-
-clone() SO3_Embedding#
-
- -
-
-set_embedding(embedding) None#
-
- -
-
-set_lmax_mmax(lmax_list: list[int], mmax_list: list[int]) None#
-
- -
-
-_expand_edge(edge_index: torch.Tensor) None#
-
- -
-
-expand_edge(edge_index: torch.Tensor)#
-
- -
-
-_reduce_edge(edge_index: torch.Tensor, num_nodes: int)#
-
- -
-
-_m_primary(mapping)#
-
- -
-
-_l_primary(mapping)#
-
- -
-
-_rotate(SO3_rotation, lmax_list: list[int], mmax_list: list[int])#
-
- -
-
-_rotate_inv(SO3_rotation, mappingReduced)#
-
- -
-
-_grid_act(SO3_grid, act, mappingReduced)#
-
- -
-
-to_grid(SO3_grid, lmax=-1)#
-
- -
-
-_from_grid(x_grid, SO3_grid, lmax: int = -1)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so3.SO3_Rotation(lmax: int)#
-

Bases: torch.nn.Module

-

Helper functions for Wigner-D rotations

-
-
Parameters:
-

(list (lmax_list) – int): List of maximum degree of the spherical harmonics

-
-
-
-
-set_wigner(rot_mat3x3)#
-
- -
-
-rotate(embedding, out_lmax: int, out_mmax: int)#
-
- -
-
-rotate_inv(embedding, in_lmax: int, in_mmax: int)#
-
- -
-
-RotationToWignerDMatrix(edge_rot_mat, start_lmax: int, end_lmax: int) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so3.SO3_Grid(lmax: int, mmax: int, normalization: str = 'integral', resolution: int | None = None)#
-

Bases: torch.nn.Module

-

Helper functions for grid representation of the irreps

-
-
Parameters:
-
    -
  • lmax (int) – Maximum degree of the spherical harmonics

  • -
  • mmax (int) – Maximum order of the spherical harmonics

  • -
-
-
-
-
-get_to_grid_mat(device)#
-
- -
-
-get_from_grid_mat(device)#
-
- -
-
-to_grid(embedding, lmax: int, mmax: int)#
-
- -
-
-from_grid(grid, lmax: int, mmax: int)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so3.SO3_Linear(in_features: int, out_features: int, lmax: int, bias: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input_embedding, output_scale=None)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-class fairchem.core.models.equiformer_v2.so3.SO3_LinearV2(in_features: int, out_features: int, lmax: int, bias: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input_embedding)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.html b/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.html deleted file mode 100644 index 8c54d35a7..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/trainers/energy_trainer/index.html +++ /dev/null @@ -1,695 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.trainers.energy_trainer — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.trainers.energy_trainer

- -
- -
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.trainers.energy_trainer#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

EquiformerV2EnergyTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

-
-
-class fairchem.core.models.equiformer_v2.trainers.energy_trainer.EquiformerV2EnergyTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
-

Bases: fairchem.core.trainers.OCPTrainer

-

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to -Relaxed State (IS2RS) tasks.

-
-

Note

-

Examples of configurations for task, model, dataset and optimizer -can be found in configs/ocp_s2ef -and configs/ocp_is2rs.

-
-
-
Parameters:
-
    -
  • task (dict) – Task configuration.

  • -
  • model (dict) – Model configuration.

  • -
  • outputs (dict) – Output property configuration.

  • -
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • -
  • optimizer (dict) – Optimizer configuration.

  • -
  • loss_fns (dict) – Loss function configuration.

  • -
  • eval_metrics (dict) – Evaluation metrics configuration.

  • -
  • identifier (str) – Experiment identifier that is appended to log directory.

  • -
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. -(default: None)

  • -
  • is_debug (bool, optional) – Run in debug mode. -(default: False)

  • -
  • print_every (int, optional) – Frequency of printing logs. -(default: 100)

  • -
  • seed (int, optional) – Random number seed. -(default: None)

  • -
  • logger (str, optional) – Type of logger to be used. -(default: wandb)

  • -
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. -(default: 0)

  • -
  • amp (bool, optional) – Run using automatic mixed precision. -(default: False)

  • -
  • slurm (dict) – Slurm configuration. Currently just for keeping track. -(default: {})

  • -
  • noddp (bool, optional) – Run model without DDP.

  • -
-
-
-
-
-load_extras()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.html b/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.html deleted file mode 100644 index 8cf997c1b..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/trainers/forces_trainer/index.html +++ /dev/null @@ -1,695 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.trainers.forces_trainer — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.trainers.forces_trainer

- -
- -
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.trainers.forces_trainer#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

EquiformerV2ForcesTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

-
-
-class fairchem.core.models.equiformer_v2.trainers.forces_trainer.EquiformerV2ForcesTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
-

Bases: fairchem.core.trainers.OCPTrainer

-

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to -Relaxed State (IS2RS) tasks.

-
-

Note

-

Examples of configurations for task, model, dataset and optimizer -can be found in configs/ocp_s2ef -and configs/ocp_is2rs.

-
-
-
Parameters:
-
    -
  • task (dict) – Task configuration.

  • -
  • model (dict) – Model configuration.

  • -
  • outputs (dict) – Output property configuration.

  • -
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • -
  • optimizer (dict) – Optimizer configuration.

  • -
  • loss_fns (dict) – Loss function configuration.

  • -
  • eval_metrics (dict) – Evaluation metrics configuration.

  • -
  • identifier (str) – Experiment identifier that is appended to log directory.

  • -
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. -(default: None)

  • -
  • is_debug (bool, optional) – Run in debug mode. -(default: False)

  • -
  • print_every (int, optional) – Frequency of printing logs. -(default: 100)

  • -
  • seed (int, optional) – Random number seed. -(default: None)

  • -
  • logger (str, optional) – Type of logger to be used. -(default: wandb)

  • -
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. -(default: 0)

  • -
  • amp (bool, optional) – Run using automatic mixed precision. -(default: False)

  • -
  • slurm (dict) – Slurm configuration. Currently just for keeping track. -(default: {})

  • -
  • noddp (bool, optional) – Run model without DDP.

  • -
-
-
-
-
-load_extras() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/trainers/index.html b/autoapi/fairchem/core/models/equiformer_v2/trainers/index.html deleted file mode 100644 index 1266ce3bd..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/trainers/index.html +++ /dev/null @@ -1,621 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.trainers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.trainers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.trainers#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.html b/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.html deleted file mode 100644 index 7b8a9c114..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/trainers/lr_scheduler/index.html +++ /dev/null @@ -1,796 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.trainers.lr_scheduler — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.equiformer_v2.trainers.lr_scheduler#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

CosineLRLambda

MultistepLRLambda

LRScheduler

Notes

-

-
-
-

Functions#

- - - - - - - - - - - - -

multiply(obj, num)

cosine_lr_lambda(current_step, scheduler_params)

multistep_lr_lambda(→ float)

-
-
-fairchem.core.models.equiformer_v2.trainers.lr_scheduler.multiply(obj, num)#
-
- -
-
-fairchem.core.models.equiformer_v2.trainers.lr_scheduler.cosine_lr_lambda(current_step: int, scheduler_params)#
-
- -
-
-class fairchem.core.models.equiformer_v2.trainers.lr_scheduler.CosineLRLambda(scheduler_params)#
-
-
-__call__(current_step: int)#
-
- -
- -
-
-fairchem.core.models.equiformer_v2.trainers.lr_scheduler.multistep_lr_lambda(current_step: int, scheduler_params) float#
-
- -
-
-class fairchem.core.models.equiformer_v2.trainers.lr_scheduler.MultistepLRLambda(scheduler_params)#
-
-
-__call__(current_step: int) float#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.trainers.lr_scheduler.LRScheduler(optimizer, config)#
-

Notes

-
    -
  1. scheduler.step() is called for every step for OC20 training.

  2. -
  3. We use “scheduler_params” in .yml to specify scheduler parameters.

  4. -
  5. -
    For cosine learning rate, we use LambdaLR with lambda function being cosine:

    scheduler: LambdaLR -scheduler_params:

    -
    -

    lambda_type: cosine -…

    -
    -
    -
    -
  6. -
  7. -
    Following 3., if cosine is used, scheduler_params in .yml looks like:

    scheduler: LambdaLR -scheduler_params:

    -
    -

    lambda_type: cosine -warmup_epochs: … -warmup_factor: … -lr_min_factor: …

    -
    -
    -
    -
  8. -
  9. -
    Following 3., if multistep is used, scheduler_params in .yml looks like:

    scheduler: LambdaLR -scheduler_params:

    -
    -

    lambda_type: multistep -warmup_epochs: … -warmup_factor: … -decay_epochs: … (list) -decay_rate: …

    -
    -
    -
    -
  10. -
-
-
Parameters:
-
    -
  • optimizer (obj) – torch optim object

  • -
  • config (dict) – Optim dict from the input config

  • -
-
-
-
-
-step(metrics=None, epoch=None)#
-
- -
-
-filter_kwargs(config)#
-
- -
-
-get_lr() float | None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.html b/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.html deleted file mode 100644 index b76171796..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/transformer_block/index.html +++ /dev/null @@ -1,786 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.transformer_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.transformer_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.transformer_block#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

SO2EquivariantGraphAttention

SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing

FeedForwardNetwork

FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation

TransBlockV2

-
param sphere_channels:
-

Number of spherical channels

-
-
-

-
-
-class fairchem.core.models.equiformer_v2.transformer_block.SO2EquivariantGraphAttention(sphere_channels: int, hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list, use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, activation='scaled_silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, use_gate_act: bool = False, use_sep_s2_act: bool = True, alpha_drop: float = 0.0)#
-

Bases: torch.nn.Module

-
-
SO2EquivariantGraphAttention: Perform MLP attention + non-linear message passing

SO(2) Convolution with radial function -> S2 Activation -> SO(2) Convolution -> attention weights and non-linear messages -attention weights * non-linear messages -> Linear

-
-
-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • -
  • num_heads (int) – Number of attention heads

  • -
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • -
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • -
  • output_channels (int) – Number of output channels

  • -
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • -
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • -
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. -The last one will be used as hidden size when use_atom_edge_embedding is True.

  • -
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • -
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • -
  • activation (str) – Type of activation function

  • -
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • -
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • -
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation.

  • -
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • -
  • alpha_drop (float) – Dropout rate for attention weights

  • -
-
-
-
-
-forward(x: torch.Tensor, atomic_numbers, edge_distance: torch.Tensor, edge_index)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.transformer_block.FeedForwardNetwork(sphere_channels: int, hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_grid, activation: str = 'scaled_silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True)#
-

Bases: torch.nn.Module

-

FeedForwardNetwork: Perform feedforward network with S2 activation or gate activation

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during feedforward network

  • -
  • output_channels (int) – Number of output channels

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • -
  • activation (str) – Type of activation function

  • -
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • -
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs.

  • -
  • use_sep_s2_act (bool) – If True, use separable grid MLP when use_grid_mlp is True.

  • -
-
-
-
-
-forward(input_embedding)#
-
- -
- -
-
-class fairchem.core.models.equiformer_v2.transformer_block.TransBlockV2(sphere_channels: int, attn_hidden_channels: int, num_heads: int, attn_alpha_channels: int, attn_value_channels: int, ffn_hidden_channels: int, output_channels: int, lmax_list: list[int], mmax_list: list[int], SO3_rotation, mappingReduced, SO3_grid, max_num_elements: int, edge_channels_list: list[int], use_atom_edge_embedding: bool = True, use_m_share_rad: bool = False, attn_activation: str = 'silu', use_s2_act_attn: bool = False, use_attn_renorm: bool = True, ffn_activation: str = 'silu', use_gate_act: bool = False, use_grid_mlp: bool = False, use_sep_s2_act: bool = True, norm_type: str = 'rms_norm_sh', alpha_drop: float = 0.0, drop_path_rate: float = 0.0, proj_drop: float = 0.0)#
-

Bases: torch.nn.Module

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • attn_hidden_channels (int) – Number of hidden channels used during SO(2) graph attention

  • -
  • num_heads (int) – Number of attention heads

  • -
  • attn_alpha_head (int) – Number of channels for alpha vector in each attention head

  • -
  • attn_value_head (int) – Number of channels for value vector in each attention head

  • -
  • ffn_hidden_channels (int) – Number of hidden channels used during feedforward network

  • -
  • output_channels (int) – Number of output channels

  • -
  • (list (edge_channels_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • (list – SO3_Rotation): Class to calculate Wigner-D matrices and rotate embeddings

  • -
  • mappingReduced (CoefficientMappingModule) – Class to convert l and m indices once node embedding is rotated

  • -
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • (list – int): List of sizes of invariant edge embedding. For example, [input_channels, hidden_channels, hidden_channels]. -The last one will be used as hidden size when use_atom_edge_embedding is True.

  • -
  • use_atom_edge_embedding (bool) – Whether to use atomic embedding along with relative distance for edge scalar features

  • -
  • use_m_share_rad (bool) – Whether all m components within a type-L vector of one channel share radial function weights

  • -
  • attn_activation (str) – Type of activation function for SO(2) graph attention

  • -
  • use_s2_act_attn (bool) – Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer

  • -
  • use_attn_renorm (bool) – Whether to re-normalize attention weights

  • -
  • ffn_activation (str) – Type of activation function for feedforward network

  • -
  • use_gate_act (bool) – If True, use gate activation. Otherwise, use S2 activation

  • -
  • use_grid_mlp (bool) – If True, use projecting to grids and performing MLPs for FFN.

  • -
  • use_sep_s2_act (bool) – If True, use separable S2 activation when use_gate_act is False.

  • -
  • norm_type (str) – Type of normalization layer ([‘layer_norm’, ‘layer_norm_sh’])

  • -
  • alpha_drop (float) – Dropout rate for attention weights

  • -
  • drop_path_rate (float) – Drop path rate

  • -
  • proj_drop (float) – Dropout rate for outputs of attention and FFN

  • -
-
-
-
-
-forward(x, atomic_numbers, edge_distance, edge_index, batch)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/equiformer_v2/wigner/index.html b/autoapi/fairchem/core/models/equiformer_v2/wigner/index.html deleted file mode 100644 index 8dab41f43..000000000 --- a/autoapi/fairchem/core/models/equiformer_v2/wigner/index.html +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - - - - fairchem.core.models.equiformer_v2.wigner — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.equiformer_v2.wigner

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.equiformer_v2.wigner#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

wigner_D(→ torch.Tensor)

_z_rot_mat(→ torch.Tensor)

-
-
-

Attributes#

- - - - - - -

_Jd

-
-
-fairchem.core.models.equiformer_v2.wigner._Jd#
-
- -
-
-fairchem.core.models.equiformer_v2.wigner.wigner_D(lv: int, alpha: torch.Tensor, beta: torch.Tensor, gamma: torch.Tensor) torch.Tensor#
-
- -
-
-fairchem.core.models.equiformer_v2.wigner._z_rot_mat(angle: torch.Tensor, lv: int) torch.Tensor#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/escn/escn/index.html b/autoapi/fairchem/core/models/escn/escn/index.html deleted file mode 100644 index e9edd2717..000000000 --- a/autoapi/fairchem/core/models/escn/escn/index.html +++ /dev/null @@ -1,941 +0,0 @@ - - - - - - - - - - - fairchem.core.models.escn.escn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.escn.escn#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

eSCN

Equivariant Spherical Channel Network

LayerBlock

Layer block: Perform one layer (message passing and aggregation) of the GNN

MessageBlock

Message block: Perform message passing

SO2Block

SO(2) Block: Perform SO(2) convolutions for all m (orders)

SO2Conv

SO(2) Conv: Perform an SO(2) convolution

EdgeBlock

Edge Block: Compute invariant edge representation from edge diatances and atomic numbers

EnergyBlock

Energy Block: Output block computing the energy

ForceBlock

Force Block: Output block computing the per atom forces

-
-
-class fairchem.core.models.escn.escn.eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False)#
-

Bases: fairchem.core.models.base.BaseModel

-

Equivariant Spherical Channel Network -Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_neighbors (int) – Maximum number of neighbors per atom

  • -
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_layers (int) – Number of layers in the GNN

  • -
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • -
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • -
  • hidden_channels (int) – Number of hidden units in message passing

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • edge_channels (int) – Number of channels for the edge invariant features

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • basis_width_scalar (float) – Width of distance basis function

  • -
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • -
  • show_timing_info (bool) – Show timing and memory info

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-forward(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
- -
-
-class fairchem.core.models.escn.escn.LayerBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act)#
-

Bases: torch.nn.Module

-

Layer block: Perform one layer (message passing and aggregation) of the GNN

-
-
Parameters:
-
    -
  • layer_idx (int) – Layer number

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • -
  • edge_channels (int) – Size of invariant edge embedding

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • distance_expansion (func) – Function used to compute distance embedding

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced)#
-
- -
- -
-
-class fairchem.core.models.escn.escn.MessageBlock(layer_idx: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], distance_expansion, max_num_elements: int, SO3_grid: fairchem.core.models.escn.so3.SO3_Grid, act)#
-

Bases: torch.nn.Module

-

Message block: Perform message passing

-
-
Parameters:
-
    -
  • layer_idx (int) – Layer number

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • -
  • edge_channels (int) – Size of invariant edge embedding

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • distance_expansion (func) – Function used to compute distance embedding

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • SO3_grid (SO3_grid) – Class used to convert from grid the spherical harmonic representations

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x, atomic_numbers, edge_distance, edge_index, SO3_edge_rot, mappingReduced)#
-
- -
- -
-
-class fairchem.core.models.escn.escn.SO2Block(sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act)#
-

Bases: torch.nn.Module

-

SO(2) Block: Perform SO(2) convolutions for all m (orders)

-
-
Parameters:
-
    -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • -
  • edge_channels (int) – Size of invariant edge embedding

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x, x_edge, mappingReduced)#
-
- -
- -
-
-class fairchem.core.models.escn.escn.SO2Conv(m: int, sphere_channels: int, hidden_channels: int, edge_channels: int, lmax_list: list[int], mmax_list: list[int], act)#
-

Bases: torch.nn.Module

-

SO(2) Conv: Perform an SO(2) convolution

-
-
Parameters:
-
    -
  • m (int) – Order of the spherical harmonic coefficients

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • hidden_channels (int) – Number of hidden channels used during the SO(2) conv

  • -
  • edge_channels (int) – Size of invariant edge embedding

  • -
  • (list (mmax_list) – int): List of degrees (l) for each resolution

  • -
  • (list – int): List of orders (m) for each resolution

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x_m, x_edge) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.escn.escn.EdgeBlock(edge_channels, distance_expansion, max_num_elements, act)#
-

Bases: torch.nn.Module

-

Edge Block: Compute invariant edge representation from edge diatances and atomic numbers

-
-
Parameters:
-
    -
  • edge_channels (int) – Size of invariant edge embedding

  • -
  • distance_expansion (func) – Function used to compute distance embedding

  • -
  • max_num_elements (int) – Maximum number of atomic numbers

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(edge_distance, source_element, target_element)#
-
- -
- -
-
-class fairchem.core.models.escn.escn.EnergyBlock(num_channels: int, num_sphere_samples: int, act)#
-

Bases: torch.nn.Module

-

Energy Block: Output block computing the energy

-
-
Parameters:
-
    -
  • num_channels (int) – Number of channels

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integral on the sphere

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x_pt) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.escn.escn.ForceBlock(num_channels: int, num_sphere_samples: int, act)#
-

Bases: torch.nn.Module

-

Force Block: Output block computing the per atom forces

-
-
Parameters:
-
    -
  • num_channels (int) – Number of channels

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integral on the sphere

  • -
  • act (function) – Non-linear activation function

  • -
-
-
-
-
-forward(x_pt, sphere_points) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/escn/index.html b/autoapi/fairchem/core/models/escn/index.html deleted file mode 100644 index a89ce90fa..000000000 --- a/autoapi/fairchem/core/models/escn/index.html +++ /dev/null @@ -1,703 +0,0 @@ - - - - - - - - - - - fairchem.core.models.escn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.escn

- -
- -
-
- - - - -
- -
-

fairchem.core.models.escn#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

eSCN

Equivariant Spherical Channel Network

-
-
-class fairchem.core.models.escn.eSCN(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_neighbors: int = 40, cutoff: float = 8.0, max_num_elements: int = 90, num_layers: int = 8, lmax_list: list[int] | None = None, mmax_list: list[int] | None = None, sphere_channels: int = 128, hidden_channels: int = 256, edge_channels: int = 128, use_grid: bool = True, num_sphere_samples: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False)#
-

Bases: fairchem.core.models.base.BaseModel

-

Equivariant Spherical Channel Network -Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_neighbors (int) – Maximum number of neighbors per atom

  • -
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_layers (int) – Number of layers in the GNN

  • -
  • lmax_list (int) – List of maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax_list (int) – List of maximum order of the spherical harmonics (0 to lmax)

  • -
  • sphere_channels (int) – Number of spherical channels (one set per resolution)

  • -
  • hidden_channels (int) – Number of hidden units in message passing

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • edge_channels (int) – Number of channels for the edge invariant features

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • basis_width_scalar (float) – Width of distance basis function

  • -
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • -
  • show_timing_info (bool) – Show timing and memory info

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-forward(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/escn/so3/index.html b/autoapi/fairchem/core/models/escn/so3/index.html deleted file mode 100644 index 1aaf76474..000000000 --- a/autoapi/fairchem/core/models/escn/so3/index.html +++ /dev/null @@ -1,934 +0,0 @@ - - - - - - - - - - - fairchem.core.models.escn.so3 — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.escn.so3#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

CoefficientMapping

Helper functions for coefficients used to reshape l<-->m and to get coefficients of specific degree or order

SO3_Embedding

Helper functions for irreps embedding

SO3_Rotation

Helper functions for Wigner-D rotations

SO3_Grid

Helper functions for grid representation of the irreps

-
-
-

Attributes#

- - - - - - -

_Jd

-
-
-fairchem.core.models.escn.so3._Jd#
-
- -
-
-class fairchem.core.models.escn.so3.CoefficientMapping(lmax_list: list[int], mmax_list: list[int], device)#
-

Helper functions for coefficients used to reshape l<–>m and to get coefficients of specific degree or order

-
-
Parameters:
-
    -
  • (list (mmax_list) – int): List of maximum degree of the spherical harmonics

  • -
  • (list – int): List of maximum order of the spherical harmonics

  • -
  • device – Device of the output

  • -
-
-
-
-
-complex_idx(m, lmax: int = -1)#
-
- -
-
-coefficient_idx(lmax: int, mmax: int) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.escn.so3.SO3_Embedding(length: int, lmax_list: list[int], num_channels: int, device: torch.device, dtype: torch.dtype)#
-

Bases: torch.nn.Module

-

Helper functions for irreps embedding

-
-
Parameters:
-
    -
  • length (int) – Batch size

  • -
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • -
  • num_channels (int) – Number of channels

  • -
  • device – Device of the output

  • -
  • dtype – type of the output tensors

  • -
-
-
-
-
-clone() SO3_Embedding#
-
- -
-
-set_embedding(embedding) None#
-
- -
-
-set_lmax_mmax(lmax_list, mmax_list) None#
-
- -
-
-_expand_edge(edge_index) None#
-
- -
-
-expand_edge(edge_index) SO3_Embedding#
-
- -
-
-_reduce_edge(edge_index, num_nodes: int) None#
-
- -
-
-_m_primary(mapping) None#
-
- -
-
-_l_primary(mapping) None#
-
- -
-
-_rotate(SO3_rotation, lmax_list, mmax_list) None#
-
- -
-
-_rotate_inv(SO3_rotation, mappingReduced) None#
-
- -
-
-_grid_act(SO3_grid, act, mappingReduced) None#
-
- -
-
-to_grid(SO3_grid, lmax: int = -1) torch.Tensor#
-
- -
-
-_from_grid(x_grid, SO3_grid, lmax: int = -1) None#
-
- -
- -
-
-class fairchem.core.models.escn.so3.SO3_Rotation(rot_mat3x3: torch.Tensor, lmax: list[int])#
-

Bases: torch.nn.Module

-

Helper functions for Wigner-D rotations

-
-
Parameters:
-
    -
  • rot_mat3x3 (tensor) – Rotation matrix

  • -
  • (list (lmax_list) – int): List of maximum degree of the spherical harmonics

  • -
-
-
-
-
-set_lmax(lmax) None#
-
- -
-
-rotate(embedding, out_lmax, out_mmax) torch.Tensor#
-
- -
-
-rotate_inv(embedding, in_lmax, in_mmax) torch.Tensor#
-
- -
-
-RotationToWignerDMatrix(edge_rot_mat: torch.Tensor, start_lmax: int, end_lmax: int) torch.Tensor#
-
- -
-
-wigner_D(lval, alpha, beta, gamma)#
-
- -
-
-_z_rot_mat(angle: torch.Tensor, lv: int) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.escn.so3.SO3_Grid(lmax: int, mmax: int)#
-

Bases: torch.nn.Module

-

Helper functions for grid representation of the irreps

-
-
Parameters:
-
    -
  • lmax (int) – Maximum degree of the spherical harmonics

  • -
  • mmax (int) – Maximum order of the spherical harmonics

  • -
-
-
-
-
-_initialize(device: torch.device) None#
-
- -
-
-get_to_grid_mat(device: torch.device)#
-
- -
-
-get_from_grid_mat(device: torch.device)#
-
- -
-
-to_grid(embedding: torch.Tensor, lmax: int, mmax: int) torch.Tensor#
-
- -
-
-from_grid(grid: torch.Tensor, lmax: int, mmax: int) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/gemnet/index.html b/autoapi/fairchem/core/models/gemnet/gemnet/index.html deleted file mode 100644 index 4ac8e919e..000000000 --- a/autoapi/fairchem/core/models/gemnet/gemnet/index.html +++ /dev/null @@ -1,752 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.gemnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet.gemnet#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

GemNetT

GemNet-T, triplets-only variant of GemNet

-
-
-class fairchem.core.models.gemnet.gemnet.GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

GemNet-T, triplets-only variant of GemNet

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the json file containing the scaling factors.

  • -
-
-
-
-
-property num_params#
-
- -
-
-get_triplets(edge_index, num_atoms)#
-

Get all b->a for each edge c->a. -It is possible that b=c, as long as the edges are distinct.

-
-
Returns:
-

    -
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • -
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • -
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) torch.Tensor#
-
- -
-
-reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
-

Reorder edges to make finding counter-directional edges easier.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. Since we only use i->j -edges here, we lose some j->i edges and add others by -making it symmetric. -We could fix this by merging edge_index with its counter-edges, -including the cell_offsets, and then running torch.unique. -But this does not seem worth it.

-
- -
-
-select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
-
- -
-
-generate_interaction_graph(data)#
-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/index.html b/autoapi/fairchem/core/models/gemnet/index.html deleted file mode 100644 index b70ec06e6..000000000 --- a/autoapi/fairchem/core/models/gemnet/index.html +++ /dev/null @@ -1,781 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

GemNetT

GemNet-T, triplets-only variant of GemNet

-
-
-class fairchem.core.models.gemnet.GemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', num_elements: int = 83, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

GemNet-T, triplets-only variant of GemNet

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the json file containing the scaling factors.

  • -
-
-
-
-
-property num_params#
-
- -
-
-get_triplets(edge_index, num_atoms)#
-

Get all b->a for each edge c->a. -It is possible that b=c, as long as the edges are distinct.

-
-
Returns:
-

    -
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • -
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • -
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, inverse_neg) torch.Tensor#
-
- -
-
-reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
-

Reorder edges to make finding counter-directional edges easier.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. Since we only use i->j -edges here, we lose some j->i edges and add others by -making it symmetric. -We could fix this by merging edge_index with its counter-edges, -including the cell_offsets, and then running torch.unique. -But this does not seem worth it.

-
- -
-
-select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
-
- -
-
-generate_interaction_graph(data)#
-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/initializers/index.html b/autoapi/fairchem/core/models/gemnet/initializers/index.html deleted file mode 100644 index d32a4b879..000000000 --- a/autoapi/fairchem/core/models/gemnet/initializers/index.html +++ /dev/null @@ -1,661 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.initializers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.initializers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet.initializers#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

-
-
-fairchem.core.models.gemnet.initializers._standardize(kernel)#
-

Makes sure that N*Var(W) = 1 and E[W] = 0

-
- -
-
-fairchem.core.models.gemnet.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
-

Generate a weight matrix with variance according to He (Kaiming) initialization. -Based on a random (semi-)orthogonal matrix neural networks -are expected to learn better when features are decorrelated -(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, -“Dropout: a simple way to prevent neural networks from overfitting”, -“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.html b/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.html deleted file mode 100644 index 2521578a3..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/atom_update_block/index.html +++ /dev/null @@ -1,736 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.atom_update_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.atom_update_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.atom_update_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

-
-
-class fairchem.core.models.gemnet.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None, name: str = 'atom_update')#
-

Bases: torch.nn.Module

-

Aggregate the message embeddings of the atoms

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_atom – Embedding size of the edges.

  • -
  • nHidden (int) – Number of residual blocks.

  • -
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-get_mlp(units_in, units, nHidden, activation)#
-
- -
-
-forward(h, m, rbf, id_j)#
-
-
Returns:
-

h – Atom embedding.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size_atom)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation=None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs)#
-

Bases: AtomUpdateBlock

-

Combines the atom update block and subsequent final dense layer.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_atom – Embedding size of the edges.

  • -
  • nHidden (int) – Number of residual blocks.

  • -
  • num_targets (int) – Number of targets.

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
  • direct_forces (bool) – If true directly predict forces without taking the gradient of the energy potential.

  • -
  • output_init (int) – Kernel initializer of the final dense layer.

  • -
-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(h, m, rbf, id_j)#
-
-
Returns:
-

    -
  • (E, F) (tuple)

  • -
  • - E (torch.Tensor, shape=(nAtoms, num_targets))

  • -
  • - F (torch.Tensor, shape=(nEdges, num_targets))

  • -
  • Energy and force prediction

  • -
-

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.html b/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.html deleted file mode 100644 index c26241bfd..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/base_layers/index.html +++ /dev/null @@ -1,811 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.base_layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.base_layers

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.base_layers#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

Dense

Combines dense layer with scaling for swish activation.

ScaledSiLU

Base class for all neural network modules.

SiQU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

-
-
-class fairchem.core.models.gemnet.layers.base_layers.Dense(in_features, out_features, bias: bool = False, activation=None)#
-

Bases: torch.nn.Module

-

Combines dense layer with scaling for swish activation.

-
-
Parameters:
-
    -
  • units (int) – Output embedding size.

  • -
  • activation (str) – Name of the activation function to use.

  • -
  • bias (bool) – True if use bias.

  • -
-
-
-
-
-reset_parameters(initializer=he_orthogonal_init) None#
-
- -
-
-forward(x)#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.base_layers.ScaledSiLU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x)#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.base_layers.SiQU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x)#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
-

Bases: torch.nn.Module

-

Residual block with output scaled by 1/sqrt(2).

-
-
Parameters:
-
    -
  • units (int) – Output embedding size.

  • -
  • nLayers (int) – Number of dense layers.

  • -
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • -
-
-
-
-
-forward(input)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.html b/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.html deleted file mode 100644 index 85f117e64..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/basis_utils/index.html +++ /dev/null @@ -1,783 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.basis_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.basis_utils

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.basis_utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - -

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to

sph_harm_prefactor(l_degree, m_order)

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

real_sph_harm(L_maxdegree, use_theta[, use_phi, ...])

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).

-
-
-fairchem.core.models.gemnet.layers.basis_utils.Jn(r: float, n: int)#
-

numerical spherical bessel functions of order n

-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.Jn_zeros(n: int, k: int)#
-

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.spherical_bessel_formulas(n: int)#
-

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.bessel_basis(n: int, k: int)#
-

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to -order n (excluded) and maximum frequency k (excluded).

-
-
Returns:
-

-
list

Bessel basis formulas taking in a single argument x. -Has length n where each element has length k. -> In total n*k many.

-
-
-

-
-
Return type:
-

bess_basis

-
-
-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int)#
-

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

-
-
Parameters:
-
    -
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • -
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • -
-
-
Returns:
-

factor

-
-
Return type:
-

float

-
-
-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
-

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials (degree L is excluded).

  • -
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • -
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.

  • -
-
-
Returns:
-

polynomials – Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.core.models.gemnet.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True)#
-

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). -Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics (degree L is excluded).

  • -
  • use_theta (bool) –

      -
    • True: Expects the input of the formula strings to contain theta.

    • -
    • False: Expects the input of the formula strings to contain z.

    • -
    -

  • -
  • use_phi (bool) –

      -
    • True: Expects the input of the formula strings to contain phi.

    • -
    • False: Expects the input of the formula strings to contain x and y.

    • -
    -

    Does nothing if zero_m_only is True

    -

  • -
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • -
-
-
Returns:
-

Y_lm_real – Computes formula strings of the the real part of the spherical harmonics up -to degree L (where degree L is not excluded). -In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then -the total count is reduced to be only L many.

-
-
Return type:
-

list

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/efficient/index.html b/autoapi/fairchem/core/models/gemnet/layers/efficient/index.html deleted file mode 100644 index 8b6478307..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/efficient/index.html +++ /dev/null @@ -1,743 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.efficient — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet.layers.efficient#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

EfficientInteractionDownProjection

Down projection in the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
-class fairchem.core.models.gemnet.layers.efficient.EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int)#
-

Bases: torch.nn.Module

-

Down projection in the efficient reformulation.

-
-
Parameters:
-
    -
  • emb_size_interm (int) – Intermediate embedding size (down-projection size).

  • -
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • -
-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(rbf, sph, id_ca, id_ragged_idx)#
-
-
Parameters:
-
    -
  • rbf (torch.Tensor, shape=(1, nEdges, num_radial))

  • -
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical))

  • -
  • id_ca

  • -
  • id_ragged_idx

  • -
-
-
Returns:
-

    -
  • rbf_W1 (torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical))

  • -
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.efficient.EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int)#
-

Bases: torch.nn.Module

-

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
Parameters:
-
    -
  • units_out (int) – Embedding output size of the bilinear layer.

  • -
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • -
-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(basis, m, id_reduce, id_ragged_idx) torch.Tensor#
-
-
Parameters:
-
    -
  • basis

  • -
  • m (quadruplets: m = m_db , triplets: m = m_ba)

  • -
  • id_reduce

  • -
  • id_ragged_idx

  • -
-
-
Returns:
-

m_ca – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, units_out)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.html b/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.html deleted file mode 100644 index 99ca22306..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/embedding_block/index.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.embedding_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.embedding_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.embedding_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

-
-
-class fairchem.core.models.gemnet.layers.embedding_block.AtomEmbedding(emb_size, num_elements: int)#
-

Bases: torch.nn.Module

-

Initial atom embeddings based on the atom type

-
-
Parameters:
-

emb_size (int) – Atom embeddings size

-
-
-
-
-forward(Z)#
-
-
Returns:
-

h – Atom embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.embedding_block.EdgeEmbedding(atom_features, edge_features, out_features, activation=None)#
-

Bases: torch.nn.Module

-

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

-
-
Parameters:
-
    -
  • emb_size (int) – Embedding size after the dense layer.

  • -
  • activation (str) – Activation function used in the dense layer.

  • -
-
-
-
-
-forward(h, m_rbf, idx_s, idx_t)#
-
-
Parameters:
-
    -
  • h

  • -
  • m_rbf (shape (nEdges, nFeatures)) – in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st

  • -
  • idx_s

  • -
  • idx_t

  • -
-
-
Returns:
-

m_st – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/index.html b/autoapi/fairchem/core/models/gemnet/layers/index.html deleted file mode 100644 index 4e79dea68..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/index.html +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.html b/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.html deleted file mode 100644 index 6659731be..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/interaction_block/index.html +++ /dev/null @@ -1,726 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.interaction_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.interaction_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.interaction_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

InteractionBlockTripletsOnly

Interaction block for GemNet-T/dT.

TripletInteraction

Triplet-based message passing block.

-
-
-class fairchem.core.models.gemnet.layers.interaction_block.InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction')#
-

Bases: torch.nn.Module

-

Interaction block for GemNet-T/dT.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
-
-
-
-
-forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t)#
-
-
Returns:
-

    -
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • -
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.interaction_block.TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs)#
-

Bases: torch.nn.Module

-

Triplet-based message passing block.

-
-
Parameters:
-
    -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.

  • -
  • emb_size_bilinear (int) – Embedding size of the edge embeddings after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
-
-
-
-
-forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca)#
-
-
Returns:
-

m – Edge embeddings (c->a).

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size_edge)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.html b/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.html deleted file mode 100644 index 0d95efd7c..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/radial_basis/index.html +++ /dev/null @@ -1,792 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.radial_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet.layers.radial_basis#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

SphericalBesselBasis

1D spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

-
param num_radial:
-

Controls maximum frequency.

-
-
-

-
-
-class fairchem.core.models.gemnet.layers.radial_basis.PolynomialEnvelope(exponent: int)#
-

Bases: torch.nn.Module

-

Polynomial envelope function that ensures a smooth cutoff.

-
-
Parameters:
-

exponent (int) – Exponent of the envelope function.

-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.radial_basis.ExponentialEnvelope#
-

Bases: torch.nn.Module

-

Exponential envelope function that ensures a smooth cutoff, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
-

Bases: torch.nn.Module

-

1D spherical Bessel basis

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
-

Bases: torch.nn.Module

-

Bernstein polynomial basis, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. -Default: gamma = 0.5 * a_0**-1 = 0.94486, -inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • -
-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None)#
-

Bases: torch.nn.Module

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • -
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • -
-
-
-
-
-forward(d)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.html b/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.html deleted file mode 100644 index f40c1a7b7..000000000 --- a/autoapi/fairchem/core/models/gemnet/layers/spherical_basis/index.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.layers.spherical_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet.layers.spherical_basis

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet.layers.spherical_basis#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

CircularBasisLayer

2D Fourier Bessel Basis

-
-
-class fairchem.core.models.gemnet.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet.layers.radial_basis.RadialBasis, cbf, efficient: bool = False)#
-

Bases: torch.nn.Module

-

2D Fourier Bessel Basis

-
-
Parameters:
-
    -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • radial_basis (RadialBasis) – Radial basis functions

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function

  • -
  • efficient (bool) – Whether to use the “efficient” summation order

  • -
-
-
-
-
-forward(D_ca, cosφ_cab, id3_ca)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet/utils/index.html b/autoapi/fairchem/core/models/gemnet/utils/index.html deleted file mode 100644 index acf36ebf1..000000000 --- a/autoapi/fairchem/core/models/gemnet/utils/index.html +++ /dev/null @@ -1,786 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet.utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

read_json(path)

update_json(→ None)

write_json(→ None)

read_value_json(path, key)

ragged_range(→ torch.Tensor)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

calculate_interatomic_vectors(→ tuple[torch.Tensor, ...)

Calculate the vectors connecting the given atom pairs,

inner_product_normalized(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

mask_neighbors(→ torch.Tensor)

-
-
-fairchem.core.models.gemnet.utils.read_json(path: str)#
-
- -
-
-fairchem.core.models.gemnet.utils.update_json(path: str, data) None#
-
- -
-
-fairchem.core.models.gemnet.utils.write_json(path: str, data) None#
-
- -
-
-fairchem.core.models.gemnet.utils.read_value_json(path: str, key: str)#
-
- -
-
-fairchem.core.models.gemnet.utils.ragged_range(sizes: torch.Tensor) torch.Tensor#
-

Multiple concatenated ranges.

-

Examples

-

sizes = [1 4 2 3] -Return: [0 0 1 2 3 0 1 0 1 2]

-
- -
-
-fairchem.core.models.gemnet.utils.repeat_blocks(sizes: torch.Tensor, repeats: int | torch.Tensor, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
-

Repeat blocks of indices. -Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

-

continuous_indexing: Whether to keep increasing the index after each block -start_idx: Starting index -block_inc: Number to increment by after each block,

-
-

either global or per block. Shape: len(sizes) - 1

-
-
-
repeat_inc: Number to increment by after each repetition,

either global or per block

-
-
-

Examples

-

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False -Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -repeat_inc = 4 -Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -start_idx = 5 -Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -block_inc = 1 -Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] -sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 1 2 0 1 2 3 4 3 4 3 4] -sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True -Return: [0 1 0 1 5 6 5 6]

-
- -
-
-fairchem.core.models.gemnet.utils.calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor) tuple[torch.Tensor, torch.Tensor]#
-

Calculate the vectors connecting the given atom pairs, -considering offsets from periodic boundary conditions (PBC).

-
-
Parameters:
-
    -
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • -
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • -
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • -
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. -Subtract this from the correct direction.

  • -
-
-
Returns:
-

(D_st, V_st)

-
-
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

-
-
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

-
-
-

-
-
Return type:
-

tuple

-
-
-
- -
-
-fairchem.core.models.gemnet.utils.inner_product_normalized(x: torch.Tensor, y: torch.Tensor) torch.Tensor#
-

Calculate the inner product between the given normalized vectors, -giving a result between -1 and 1.

-
- -
-
-fairchem.core.models.gemnet.utils.mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor) torch.Tensor#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.html b/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.html deleted file mode 100644 index e18d9951d..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/gemnet/index.html +++ /dev/null @@ -1,752 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.gemnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp.gemnet#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

GraphParallelGemNetT

GemNet-T, triplets-only variant of GemNet

-
-
-class fairchem.core.models.gemnet_gp.gemnet.GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

GemNet-T, triplets-only variant of GemNet

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the json file containing the scaling factors.

  • -
-
-
-
-
-property num_params#
-
- -
-
-get_triplets(edge_index, num_atoms)#
-

Get all b->a for each edge c->a. -It is possible that b=c, as long as the edges are distinct.

-
-
Returns:
-

    -
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • -
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • -
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
-
- -
-
-reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
-

Reorder edges to make finding counter-directional edges easier.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. Since we only use i->j -edges here, we lose some j->i edges and add others by -making it symmetric. -We could fix this by merging edge_index with its counter-edges, -including the cell_offsets, and then running torch.unique. -But this does not seem worth it.

-
- -
-
-select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
-
- -
-
-generate_interaction_graph(data)#
-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/index.html b/autoapi/fairchem/core/models/gemnet_gp/index.html deleted file mode 100644 index 59bc8fb06..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/index.html +++ /dev/null @@ -1,781 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

GraphParallelGemNetT

GemNet-T, triplets-only variant of GemNet

-
-
-class fairchem.core.models.gemnet_gp.GraphParallelGemNetT(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, regress_forces: bool = True, direct_forces: bool = False, cutoff: float = 6.0, max_neighbors: int = 50, rbf: dict | None = None, envelope: dict | None = None, cbf: dict | None = None, extensive: bool = True, otf_graph: bool = False, use_pbc: bool = True, output_init: str = 'HeOrthogonal', activation: str = 'swish', scale_num_blocks: bool = False, scatter_atoms: bool = True, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

GemNet-T, triplets-only variant of GemNet

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • cutoff (float) – Embedding cutoff for interactomic directions in Angstrom.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the json file containing the scaling factors.

  • -
-
-
-
-
-property num_params#
-
- -
-
-get_triplets(edge_index, num_atoms)#
-

Get all b->a for each edge c->a. -It is possible that b=c, as long as the edges are distinct.

-
-
Returns:
-

    -
  • id3_ba (torch.Tensor, shape (num_triplets,)) – Indices of input edge b->a of each triplet b->a<-c

  • -
  • id3_ca (torch.Tensor, shape (num_triplets,)) – Indices of output edge c->a of each triplet b->a<-c

  • -
  • id3_ragged_idx (torch.Tensor, shape (num_triplets,)) – Indices enumerating the copies of id3_ca for creating a padded matrix

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
-
- -
-
-reorder_symmetric_edges(edge_index, cell_offsets, neighbors, edge_dist, edge_vector)#
-

Reorder edges to make finding counter-directional edges easier.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. Since we only use i->j -edges here, we lose some j->i edges and add others by -making it symmetric. -We could fix this by merging edge_index with its counter-edges, -including the cell_offsets, and then running torch.unique. -But this does not seem worth it.

-
- -
-
-select_edges(data, edge_index, cell_offsets, neighbors, edge_dist, edge_vector, cutoff=None)#
-
- -
-
-generate_interaction_graph(data)#
-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/initializers/index.html b/autoapi/fairchem/core/models/gemnet_gp/initializers/index.html deleted file mode 100644 index 7b871a8d5..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/initializers/index.html +++ /dev/null @@ -1,661 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.initializers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.initializers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.initializers#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

-
-
-fairchem.core.models.gemnet_gp.initializers._standardize(kernel)#
-

Makes sure that N*Var(W) = 1 and E[W] = 0

-
- -
-
-fairchem.core.models.gemnet_gp.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
-

Generate a weight matrix with variance according to He (Kaiming) initialization. -Based on a random (semi-)orthogonal matrix neural networks -are expected to learn better when features are decorrelated -(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, -“Dropout: a simple way to prevent neural networks from overfitting”, -“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.html deleted file mode 100644 index ae16a4633..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/atom_update_block/index.html +++ /dev/null @@ -1,777 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.atom_update_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.atom_update_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

-
-
-

Functions#

- - - - - - -

scatter_sum(→ torch.Tensor)

Clone of torch_scatter.scatter_sum but without in-place operations

-
-
-fairchem.core.models.gemnet_gp.layers.atom_update_block.scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1, out: torch.Tensor | None = None, dim_size: int | None = None) torch.Tensor#
-

Clone of torch_scatter.scatter_sum but without in-place operations

-
- -
-
-class fairchem.core.models.gemnet_gp.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation: str | None = None, name: str = 'atom_update')#
-

Bases: torch.nn.Module

-

Aggregate the message embeddings of the atoms

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_atom – Embedding size of the edges.

  • -
  • nHidden (int) – Number of residual blocks.

  • -
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-get_mlp(units_in: int, units: int, nHidden: int, activation: str | None)#
-
- -
-
-forward(nAtoms: int, m: int, rbf, id_j)#
-
-
Returns:
-

h – Atom embedding.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size_atom)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, num_targets: int, activation: str | None = None, direct_forces: bool = True, output_init: str = 'HeOrthogonal', name: str = 'output', **kwargs)#
-

Bases: AtomUpdateBlock

-

Combines the atom update block and subsequent final dense layer.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_atom – Embedding size of the edges.

  • -
  • nHidden (int) – Number of residual blocks.

  • -
  • num_targets (int) – Number of targets.

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
  • direct_forces (bool) – If true directly predict forces without taking the gradient of the energy potential.

  • -
  • output_init (int) – Kernel initializer of the final dense layer.

  • -
-
-
-
-
-dense_rbf_F: fairchem.core.models.gemnet_gp.layers.base_layers.Dense#
-
- -
-
-out_forces: fairchem.core.models.gemnet_gp.layers.base_layers.Dense#
-
- -
-
-out_energy: fairchem.core.models.gemnet_gp.layers.base_layers.Dense#
-
- -
-
-reset_parameters() None#
-
- -
-
-forward(nAtoms: int, m, rbf, id_j: torch.Tensor)#
-
-
Returns:
-

    -
  • (E, F) (tuple)

  • -
  • - E (torch.Tensor, shape=(nAtoms, num_targets))

  • -
  • - F (torch.Tensor, shape=(nEdges, num_targets))

  • -
  • Energy and force prediction

  • -
-

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.html deleted file mode 100644 index 64515d61d..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/base_layers/index.html +++ /dev/null @@ -1,811 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.base_layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers.base_layers

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.base_layers#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

Dense

Combines dense layer with scaling for swish activation.

ScaledSiLU

Base class for all neural network modules.

SiQU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

-
-
-class fairchem.core.models.gemnet_gp.layers.base_layers.Dense(num_in_features: int, num_out_features: int, bias: bool = False, activation: str | None = None)#
-

Bases: torch.nn.Module

-

Combines dense layer with scaling for swish activation.

-
-
Parameters:
-
    -
  • units (int) – Output embedding size.

  • -
  • activation (str) – Name of the activation function to use.

  • -
  • bias (bool) – True if use bias.

  • -
-
-
-
-
-reset_parameters(initializer=he_orthogonal_init) None#
-
- -
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.base_layers.ScaledSiLU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.base_layers.SiQU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
-

Bases: torch.nn.Module

-

Residual block with output scaled by 1/sqrt(2).

-
-
Parameters:
-
    -
  • units (int) – Output embedding size.

  • -
  • nLayers (int) – Number of dense layers.

  • -
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • -
-
-
-
-
-forward(input: torch.Tensor) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.html deleted file mode 100644 index 1642b9e78..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/basis_utils/index.html +++ /dev/null @@ -1,783 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.basis_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers.basis_utils

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.basis_utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - -

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to

sph_harm_prefactor(→ float)

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

real_sph_harm(L_maxdegree, use_theta[, use_phi, ...])

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).

-
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.Jn(r: float, n: int)#
-

numerical spherical bessel functions of order n

-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.Jn_zeros(n: int, k: int)#
-

Compute the first k zeros of the spherical bessel functions up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.spherical_bessel_formulas(n)#
-

Computes the sympy formulas for the spherical bessel functions up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.bessel_basis(n: int, k: int)#
-

Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to -order n (excluded) and maximum frequency k (excluded).

-
-
Returns:
-

-
list

Bessel basis formulas taking in a single argument x. -Has length n where each element has length k. -> In total n*k many.

-
-
-

-
-
Return type:
-

bess_basis

-
-
-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int) float#
-

Computes the constant pre-factor for the spherical harmonic of degree l and order m.

-
-
Parameters:
-
    -
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • -
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • -
-
-
Returns:
-

factor

-
-
Return type:
-

float

-
-
-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
-

Computes string formulas of the associated legendre polynomials up to degree L (excluded).

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials (degree L is excluded).

  • -
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • -
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.

  • -
-
-
Returns:
-

polynomials – Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.core.models.gemnet_gp.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True)#
-

Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded). -Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics (degree L is excluded).

  • -
  • use_theta (bool) –

      -
    • True: Expects the input of the formula strings to contain theta.

    • -
    • False: Expects the input of the formula strings to contain z.

    • -
    -

  • -
  • use_phi (bool) –

      -
    • True: Expects the input of the formula strings to contain phi.

    • -
    • False: Expects the input of the formula strings to contain x and y.

    • -
    -

    Does nothing if zero_m_only is True

    -

  • -
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • -
-
-
Returns:
-

Y_lm_real – Computes formula strings of the the real part of the spherical harmonics up -to degree L (where degree L is not excluded). -In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then -the total count is reduced to be only L many.

-
-
Return type:
-

list

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.html deleted file mode 100644 index 7abd6e1d2..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/efficient/index.html +++ /dev/null @@ -1,743 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.efficient — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.efficient#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

EfficientInteractionDownProjection

Down projection in the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
-class fairchem.core.models.gemnet_gp.layers.efficient.EfficientInteractionDownProjection(num_spherical: int, num_radial: int, emb_size_interm: int)#
-

Bases: torch.nn.Module

-

Down projection in the efficient reformulation.

-
-
Parameters:
-
    -
  • emb_size_interm (int) – Intermediate embedding size (down-projection size).

  • -
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • -
-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(rbf: torch.Tensor, sph: torch.Tensor, id_ca, id_ragged_idx, Kmax: int) tuple[torch.Tensor, torch.Tensor]#
-
-
Parameters:
-
    -
  • rbf (torch.Tensor, shape=(1, nEdges, num_radial))

  • -
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical))

  • -
  • id_ca

  • -
  • id_ragged_idx

  • -
-
-
Returns:
-

    -
  • rbf_W1 (torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical))

  • -
  • sph (torch.Tensor, shape=(nEdges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.efficient.EfficientInteractionBilinear(emb_size: int, emb_size_interm: int, units_out: int)#
-

Bases: torch.nn.Module

-

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
Parameters:
-
    -
  • units_out (int) – Embedding output size of the bilinear layer.

  • -
  • kernel_initializer (callable) – Initializer of the weight matrix.

  • -
-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(basis: tuple[torch.Tensor, torch.Tensor], m, id_reduce, id_ragged_idx, edge_offset, Kmax: int) torch.Tensor#
-
-
Parameters:
-
    -
  • basis

  • -
  • m (quadruplets: m = m_db , triplets: m = m_ba)

  • -
  • id_reduce

  • -
  • id_ragged_idx

  • -
-
-
Returns:
-

m_ca – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, units_out)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.html deleted file mode 100644 index 74af5aecf..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/embedding_block/index.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.embedding_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers.embedding_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.embedding_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

-
-
-class fairchem.core.models.gemnet_gp.layers.embedding_block.AtomEmbedding(emb_size: int)#
-

Bases: torch.nn.Module

-

Initial atom embeddings based on the atom type

-
-
Parameters:
-

emb_size (int) – Atom embeddings size

-
-
-
-
-forward(Z) torch.Tensor#
-
-
Returns:
-

h – Atom embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.embedding_block.EdgeEmbedding(atom_features: int, edge_features: int, num_out_features: int, activation: str | None = None)#
-

Bases: torch.nn.Module

-

Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.

-
-
Parameters:
-
    -
  • emb_size (int) – Embedding size after the dense layer.

  • -
  • activation (str) – Activation function used in the dense layer.

  • -
-
-
-
-
-forward(h, m_rbf, idx_s, idx_t) torch.Tensor#
-
-
Parameters:
-
    -
  • h

  • -
  • m_rbf (shape (nEdges, nFeatures)) – in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st

  • -
  • idx_s

  • -
  • idx_t

  • -
-
-
Returns:
-

m_st – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/index.html deleted file mode 100644 index 21b19fb54..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/index.html +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.html deleted file mode 100644 index 09764e24e..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/interaction_block/index.html +++ /dev/null @@ -1,729 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.interaction_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers.interaction_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.interaction_block#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

InteractionBlockTripletsOnly

Interaction block for GemNet-T/dT.

TripletInteraction

Triplet-based message passing block.

-
-
-class fairchem.core.models.gemnet_gp.layers.interaction_block.InteractionBlockTripletsOnly(emb_size_atom: int, emb_size_edge: int, emb_size_trip: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_bil_trip: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, activation: str | None = None, name: str = 'Interaction')#
-

Bases: torch.nn.Module

-

Interaction block for GemNet-T/dT.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size in the triplet message passing block.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_bil_trip (int) – Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
-
-
-
-
-forward(h: torch.Tensor, m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, rbf_h, idx_s, idx_t, edge_offset, Kmax, nAtoms)#
-
-
Returns:
-

    -
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • -
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • -
  • Node (h)

  • -
  • Edge (m, rbf3, id_swap, rbf_h, idx_s, idx_t, cbf3[0], cbf3[1] (dense))

  • -
  • Triplet (id3_ragged_idx, id3_ba, id3_ca)

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.interaction_block.TripletInteraction(emb_size_edge: int, emb_size_trip: int, emb_size_bilinear: int, emb_size_rbf: int, emb_size_cbf: int, activation: str | None = None, name: str = 'TripletInteraction', **kwargs)#
-

Bases: torch.nn.Module

-

Triplet-based message passing block.

-
-
Parameters:
-
    -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip (int) – (Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.

  • -
  • emb_size_bilinear (int) – Embedding size of the edge embeddings after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • activation (str) – Name of the activation function to use in the dense layers except for the final dense layer.

  • -
-
-
-
-
-forward(m: torch.Tensor, rbf3, cbf3, id3_ragged_idx, id_swap, id3_ba, id3_ca, edge_offset, Kmax)#
-
-
Returns:
-

m – Edge embeddings (c->a).

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size_edge)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.html deleted file mode 100644 index e677361cc..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/radial_basis/index.html +++ /dev/null @@ -1,792 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.radial_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.radial_basis#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

SphericalBesselBasis

1D spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

-
param num_radial:
-

Controls maximum frequency.

-
-
-

-
-
-class fairchem.core.models.gemnet_gp.layers.radial_basis.PolynomialEnvelope(exponent: int)#
-

Bases: torch.nn.Module

-

Polynomial envelope function that ensures a smooth cutoff.

-
-
Parameters:
-

exponent (int) – Exponent of the envelope function.

-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.radial_basis.ExponentialEnvelope#
-

Bases: torch.nn.Module

-

Exponential envelope function that ensures a smooth cutoff, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
-forward(d_scaled) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
-

Bases: torch.nn.Module

-

1D spherical Bessel basis

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
-
-
-
-
-forward(d_scaled)#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
-

Bases: torch.nn.Module

-

Bernstein polynomial basis, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. -Default: gamma = 0.5 * a_0**-1 = 0.94486, -inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • -
-
-
-
-
-forward(d_scaled) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_gp.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None)#
-

Bases: torch.nn.Module

-
-
Parameters:
-
    -
  • num_radial (int) – Controls maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • -
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • -
-
-
-
-
-forward(d)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.html b/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.html deleted file mode 100644 index 6c220a622..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/layers/spherical_basis/index.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.layers.spherical_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_gp.layers.spherical_basis

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet_gp.layers.spherical_basis#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

CircularBasisLayer

2D Fourier Bessel Basis

-
-
-class fairchem.core.models.gemnet_gp.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_gp.layers.radial_basis.RadialBasis, cbf, efficient: bool = False)#
-

Bases: torch.nn.Module

-

2D Fourier Bessel Basis

-
-
Parameters:
-
    -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • radial_basis (RadialBasis) – Radial basis functions

  • -
  • cbf (dict) – Name and hyperparameters of the cosine basis function

  • -
  • efficient (bool) – Whether to use the “efficient” summation order

  • -
-
-
-
-
-forward(D_ca, cosφ_cab, id3_ca)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_gp/utils/index.html b/autoapi/fairchem/core/models/gemnet_gp/utils/index.html deleted file mode 100644 index 62b8455b0..000000000 --- a/autoapi/fairchem/core/models/gemnet_gp/utils/index.html +++ /dev/null @@ -1,786 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_gp.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_gp.utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

read_json(path)

update_json(→ None)

write_json(→ None)

read_value_json(path, key)

ragged_range(sizes)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

calculate_interatomic_vectors(→ tuple[torch.Tensor, ...)

Calculate the vectors connecting the given atom pairs,

inner_product_normalized(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

mask_neighbors(neighbors, edge_mask)

-
-
-fairchem.core.models.gemnet_gp.utils.read_json(path: str)#
-
- -
-
-fairchem.core.models.gemnet_gp.utils.update_json(path: str, data) None#
-
- -
-
-fairchem.core.models.gemnet_gp.utils.write_json(path: str, data) None#
-
- -
-
-fairchem.core.models.gemnet_gp.utils.read_value_json(path: str, key)#
-
- -
-
-fairchem.core.models.gemnet_gp.utils.ragged_range(sizes)#
-

Multiple concatenated ranges.

-

Examples

-

sizes = [1 4 2 3] -Return: [0 0 1 2 3 0 1 0 1 2]

-
- -
-
-fairchem.core.models.gemnet_gp.utils.repeat_blocks(sizes: torch.Tensor, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
-

Repeat blocks of indices. -Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

-

continuous_indexing: Whether to keep increasing the index after each block -start_idx: Starting index -block_inc: Number to increment by after each block,

-
-

either global or per block. Shape: len(sizes) - 1

-
-
-
repeat_inc: Number to increment by after each repetition,

either global or per block

-
-
-

Examples

-

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False -Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -repeat_inc = 4 -Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -start_idx = 5 -Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -block_inc = 1 -Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] -sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 1 2 0 1 2 3 4 3 4 3 4] -sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True -Return: [0 1 0 1 5 6 5 6]

-
- -
-
-fairchem.core.models.gemnet_gp.utils.calculate_interatomic_vectors(R: torch.Tensor, id_s: torch.Tensor, id_t: torch.Tensor, offsets_st: torch.Tensor | None) tuple[torch.Tensor, torch.Tensor]#
-

Calculate the vectors connecting the given atom pairs, -considering offsets from periodic boundary conditions (PBC).

-
-
Parameters:
-
    -
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • -
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • -
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • -
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. -Subtract this from the correct direction.

  • -
-
-
Returns:
-

(D_st, V_st)

-
-
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

-
-
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

-
-
-

-
-
Return type:
-

tuple

-
-
-
- -
-
-fairchem.core.models.gemnet_gp.utils.inner_product_normalized(x: torch.Tensor, y: torch.Tensor) torch.Tensor#
-

Calculate the inner product between the given normalized vectors, -giving a result between -1 and 1.

-
- -
-
-fairchem.core.models.gemnet_gp.utils.mask_neighbors(neighbors: torch.Tensor, edge_mask: torch.Tensor)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.html b/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.html deleted file mode 100644 index 3913a9fba..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/gemnet_oc/index.html +++ /dev/null @@ -1,878 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.gemnet_oc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_oc.gemnet_oc#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

GemNetOC

-
param num_atoms (int):
-

-
-

-
-
-class fairchem.core.models.gemnet_oc.gemnet_oc.GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs)#
-

Bases: fairchem.core.models.base.BaseModel

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_aint_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • -
  • emb_size_aint_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • num_output_afteratom (int) – Number of residual blocks in the output blocks -after adding the atom embedding.

  • -
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • -
  • num_global_out_layers (int) – Number of final residual blocks before the output.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • use_pbc (bool) – Whether to use periodic boundary conditions.

  • -
  • scale_backprop_forces (bool) – Whether to scale up the energy and then scales down the forces -to prevent NaNs and infs in backpropagated forces.

  • -
  • cutoff (float) – Embedding cutoff for interatomic connections and embeddings in Angstrom.

  • -
  • cutoff_qint (float) – Quadruplet interaction cutoff in Angstrom. -Optional. Uses cutoff per default.

  • -
  • cutoff_aeaint (float) – Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. -Optional. Uses cutoff per default.

  • -
  • cutoff_aint (float) – Atom-to-atom interaction cutoff in Angstrom. -Optional. Uses maximum of all other cutoffs per default.

  • -
  • max_neighbors (int) – Maximum number of neighbors for interatomic connections and embeddings.

  • -
  • max_neighbors_qint (int) – Maximum number of quadruplet interactions per embedding. -Optional. Uses max_neighbors per default.

  • -
  • max_neighbors_aeaint (int) – Maximum number of edge-to-atom and atom-to-edge interactions per embedding. -Optional. Uses max_neighbors per default.

  • -
  • max_neighbors_aint (int) – Maximum number of atom-to-atom interactions per atom. -Optional. Uses maximum of all other neighbors per default.

  • -
  • enforce_max_neighbors_strictly (bool) – When subselected edges based on max_neighbors args, arbitrarily -select amongst degenerate edges to have exactly the correct number.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • rbf_spherical (dict) – Name and hyperparameters of the radial basis function used as part of the -circular and spherical bases. -Optional. Uses rbf per default.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • -
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • forces_coupled (bool) – If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False.

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the pytorch file containing the scaling factors.

  • -
  • quad_interaction (bool) – Whether to use quadruplet interactions (with dihedral angles)

  • -
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions

  • -
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions

  • -
  • atom_interaction (bool) – Whether to use atom-to-atom interactions

  • -
  • scale_basis (bool) – Whether to use a scaling layer in the raw basis function for better -numerical stability.

  • -
  • qint_tags (list) – Which atom tags to use quadruplet interactions for. -0=sub-surface bulk, 1=surface, 2=adsorbate atoms.

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)#
-
- -
-
-set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint)#
-
- -
-
-init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis)#
-
- -
-
-init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf)#
-
- -
-
-calculate_quad_angles(V_st, V_qint_st, quad_idx)#
-

Calculate angles for quadruplet-based message passing.

-
-
Parameters:
-
    -
  • V_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t

  • -
  • V_qint_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t for the quadruplet -interaction graph

  • -
  • quad_idx (dict of torch.Tensor) – Indices relevant for quadruplet interactions.

  • -
-
-
Returns:
-

    -
  • cosφ_cab (Tensor, shape = (num_triplets_inint,)) – Cosine of angle between atoms c -> a <- b.

  • -
  • cosφ_abd (Tensor, shape = (num_triplets_qint,)) – Cosine of angle between atoms a -> b -> d.

  • -
  • angle_cabd (Tensor, shape = (num_quadruplets,)) – Dihedral angle between atoms c <- a-b -> d.

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) torch.Tensor#
-

Use a mask to remove values of removed edges and then -duplicate the values for the correct edge direction.

-
-
Parameters:
-
    -
  • tensor (torch.Tensor) – Values to symmetrize for the new tensor.

  • -
  • mask (torch.Tensor) – Mask defining which edges go in the correct direction.

  • -
  • reorder_idx (torch.Tensor) – Indices defining how to reorder the tensor values after -concatenating the edge values of both directions.

  • -
  • opposite_neg (bool) – Whether the edge in the opposite direction should use the -negative tensor value.

  • -
-
-
Returns:
-

tensor_ordered – A tensor with symmetrized values.

-
-
Return type:
-

torch.Tensor

-
-
-
- -
-
-symmetrize_edges(graph, batch_idx)#
-

Symmetrize edges to ensure existence of counter-directional edges.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. -We only use i->j edges here. So we lose some j->i edges -and add others by making it symmetric.

-
- -
-
-subselect_edges(data, graph, cutoff=None, max_neighbors=None)#
-

Subselect edges using a stricter cutoff and max_neighbors.

-
- -
-
-generate_graph_dict(data, cutoff, max_neighbors)#
-

Generate a radius/nearest neighbor graph.

-
- -
-
-subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig)#
-

If the new cutoff and max_neighbors is different from the original, -subselect the edges of a given graph.

-
- -
-
-get_graphs_and_indices(data)#
-

“Generate embedding and interaction graphs and indices.

-
- -
-
-get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms)#
-

Calculate and transform basis functions.

-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/index.html b/autoapi/fairchem/core/models/gemnet_oc/index.html deleted file mode 100644 index 734dff08d..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/index.html +++ /dev/null @@ -1,909 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_oc#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

GemNetOC

-
param num_atoms (int):
-

-
-

-
-
-class fairchem.core.models.gemnet_oc.GemNetOC(num_atoms: int | None, bond_feat_dim: int, num_targets: int, num_spherical: int, num_radial: int, num_blocks: int, emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_aint_in: int, emb_size_aint_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_output_afteratom: int, num_atom_emb_layers: int = 0, num_global_out_layers: int = 2, regress_forces: bool = True, direct_forces: bool = False, use_pbc: bool = True, scale_backprop_forces: bool = False, cutoff: float = 6.0, cutoff_qint: float | None = None, cutoff_aeaint: float | None = None, cutoff_aint: float | None = None, max_neighbors: int = 50, max_neighbors_qint: int | None = None, max_neighbors_aeaint: int | None = None, max_neighbors_aint: int | None = None, enforce_max_neighbors_strictly: bool = True, rbf: dict[str, str] | None = None, rbf_spherical: dict | None = None, envelope: dict[str, str | int] | None = None, cbf: dict[str, str] | None = None, sbf: dict[str, str] | None = None, extensive: bool = True, forces_coupled: bool = False, output_init: str = 'HeOrthogonal', activation: str = 'silu', quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, scale_basis: bool = False, qint_tags: list | None = None, num_elements: int = 83, otf_graph: bool = False, scale_file: str | None = None, **kwargs)#
-

Bases: fairchem.core.models.base.BaseModel

-
-
Parameters:
-
    -
  • (int) (bond_feat_dim)

  • -
  • (int)

  • -
  • num_targets (int) – Number of prediction targets.

  • -
  • num_spherical (int) – Controls maximum frequency.

  • -
  • num_radial (int) – Controls maximum frequency.

  • -
  • num_blocks (int) – Number of building blocks to be stacked.

  • -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_aint_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • -
  • emb_size_aint_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • num_output_afteratom (int) – Number of residual blocks in the output blocks -after adding the atom embedding.

  • -
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • -
  • num_global_out_layers (int) – Number of final residual blocks before the output.

  • -
  • regress_forces (bool) – Whether to predict forces. Default: True

  • -
  • direct_forces (bool) – If True predict forces based on aggregation of interatomic directions. -If False predict forces based on negative gradient of energy potential.

  • -
  • use_pbc (bool) – Whether to use periodic boundary conditions.

  • -
  • scale_backprop_forces (bool) – Whether to scale up the energy and then scales down the forces -to prevent NaNs and infs in backpropagated forces.

  • -
  • cutoff (float) – Embedding cutoff for interatomic connections and embeddings in Angstrom.

  • -
  • cutoff_qint (float) – Quadruplet interaction cutoff in Angstrom. -Optional. Uses cutoff per default.

  • -
  • cutoff_aeaint (float) – Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. -Optional. Uses cutoff per default.

  • -
  • cutoff_aint (float) – Atom-to-atom interaction cutoff in Angstrom. -Optional. Uses maximum of all other cutoffs per default.

  • -
  • max_neighbors (int) – Maximum number of neighbors for interatomic connections and embeddings.

  • -
  • max_neighbors_qint (int) – Maximum number of quadruplet interactions per embedding. -Optional. Uses max_neighbors per default.

  • -
  • max_neighbors_aeaint (int) – Maximum number of edge-to-atom and atom-to-edge interactions per embedding. -Optional. Uses max_neighbors per default.

  • -
  • max_neighbors_aint (int) – Maximum number of atom-to-atom interactions per atom. -Optional. Uses maximum of all other neighbors per default.

  • -
  • enforce_max_neighbors_strictly (bool) – When subselected edges based on max_neighbors args, arbitrarily -select amongst degenerate edges to have exactly the correct number.

  • -
  • rbf (dict) – Name and hyperparameters of the radial basis function.

  • -
  • rbf_spherical (dict) – Name and hyperparameters of the radial basis function used as part of the -circular and spherical bases. -Optional. Uses rbf per default.

  • -
  • envelope (dict) – Name and hyperparameters of the envelope function.

  • -
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • -
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • -
  • extensive (bool) – Whether the output should be extensive (proportional to the number of atoms)

  • -
  • forces_coupled (bool) – If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False.

  • -
  • output_init (str) – Initialization method for the final dense layer.

  • -
  • activation (str) – Name of the activation function.

  • -
  • scale_file (str) – Path to the pytorch file containing the scaling factors.

  • -
  • quad_interaction (bool) – Whether to use quadruplet interactions (with dihedral angles)

  • -
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions

  • -
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions

  • -
  • atom_interaction (bool) – Whether to use atom-to-atom interactions

  • -
  • scale_basis (bool) – Whether to use a scaling layer in the raw basis function for better -numerical stability.

  • -
  • qint_tags (list) – Which atom tags to use quadruplet interactions for. -0=sub-surface bulk, 1=surface, 2=adsorbate atoms.

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)#
-
- -
-
-set_max_neighbors(max_neighbors, max_neighbors_qint, max_neighbors_aeaint, max_neighbors_aint)#
-
- -
-
-init_basis_functions(num_radial, num_spherical, rbf, rbf_spherical, envelope, cbf, sbf, scale_basis)#
-
- -
-
-init_shared_basis_layers(num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf)#
-
- -
-
-calculate_quad_angles(V_st, V_qint_st, quad_idx)#
-

Calculate angles for quadruplet-based message passing.

-
-
Parameters:
-
    -
  • V_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t

  • -
  • V_qint_st (Tensor, shape = (nAtoms, 3)) – Normalized directions from s to t for the quadruplet -interaction graph

  • -
  • quad_idx (dict of torch.Tensor) – Indices relevant for quadruplet interactions.

  • -
-
-
Returns:
-

    -
  • cosφ_cab (Tensor, shape = (num_triplets_inint,)) – Cosine of angle between atoms c -> a <- b.

  • -
  • cosφ_abd (Tensor, shape = (num_triplets_qint,)) – Cosine of angle between atoms a -> b -> d.

  • -
  • angle_cabd (Tensor, shape = (num_quadruplets,)) – Dihedral angle between atoms c <- a-b -> d.

  • -
-

-
-
-
- -
-
-select_symmetric_edges(tensor: torch.Tensor, mask: torch.Tensor, reorder_idx: torch.Tensor, opposite_neg) torch.Tensor#
-

Use a mask to remove values of removed edges and then -duplicate the values for the correct edge direction.

-
-
Parameters:
-
    -
  • tensor (torch.Tensor) – Values to symmetrize for the new tensor.

  • -
  • mask (torch.Tensor) – Mask defining which edges go in the correct direction.

  • -
  • reorder_idx (torch.Tensor) – Indices defining how to reorder the tensor values after -concatenating the edge values of both directions.

  • -
  • opposite_neg (bool) – Whether the edge in the opposite direction should use the -negative tensor value.

  • -
-
-
Returns:
-

tensor_ordered – A tensor with symmetrized values.

-
-
Return type:
-

torch.Tensor

-
-
-
- -
-
-symmetrize_edges(graph, batch_idx)#
-

Symmetrize edges to ensure existence of counter-directional edges.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. -We only use i->j edges here. So we lose some j->i edges -and add others by making it symmetric.

-
- -
-
-subselect_edges(data, graph, cutoff=None, max_neighbors=None)#
-

Subselect edges using a stricter cutoff and max_neighbors.

-
- -
-
-generate_graph_dict(data, cutoff, max_neighbors)#
-

Generate a radius/nearest neighbor graph.

-
- -
-
-subselect_graph(data, graph, cutoff, max_neighbors, cutoff_orig, max_neighbors_orig)#
-

If the new cutoff and max_neighbors is different from the original, -subselect the edges of a given graph.

-
- -
-
-get_graphs_and_indices(data)#
-

“Generate embedding and interaction graphs and indices.

-
- -
-
-get_bases(main_graph, a2a_graph, a2ee2a_graph, qint_graph, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx, num_atoms)#
-

Calculate and transform basis functions.

-
- -
-
-forward(data)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/initializers/index.html b/autoapi/fairchem/core/models/gemnet_oc/initializers/index.html deleted file mode 100644 index ba25843ad..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/initializers/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.initializers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.initializers

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.initializers#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

_standardize(kernel)

Makes sure that N*Var(W) = 1 and E[W] = 0

he_orthogonal_init(→ torch.Tensor)

Generate a weight matrix with variance according to He (Kaiming) initialization.

grid_init(→ torch.Tensor)

Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end.

log_grid_init(→ torch.Tensor)

Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end.

get_initializer(name, **init_kwargs)

-
-
-fairchem.core.models.gemnet_oc.initializers._standardize(kernel)#
-

Makes sure that N*Var(W) = 1 and E[W] = 0

-
- -
-
-fairchem.core.models.gemnet_oc.initializers.he_orthogonal_init(tensor: torch.Tensor) torch.Tensor#
-

Generate a weight matrix with variance according to He (Kaiming) initialization. -Based on a random (semi-)orthogonal matrix neural networks -are expected to learn better when features are decorrelated -(stated by eg. “Reducing overfitting in deep networks by decorrelating representations”, -“Dropout: a simple way to prevent neural networks from overfitting”, -“Exact solutions to the nonlinear dynamics of learning in deep linear neural networks”)

-
- -
-
-fairchem.core.models.gemnet_oc.initializers.grid_init(tensor: torch.Tensor, start: int = -1, end: int = 1) torch.Tensor#
-

Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end.

-
- -
-
-fairchem.core.models.gemnet_oc.initializers.log_grid_init(tensor: torch.Tensor, start: int = -4, end: int = 0) torch.Tensor#
-

Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end.

-
- -
-
-fairchem.core.models.gemnet_oc.initializers.get_initializer(name, **init_kwargs)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.html b/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.html deleted file mode 100644 index 95574b886..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/interaction_indices/index.html +++ /dev/null @@ -1,772 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.interaction_indices — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.interaction_indices

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.interaction_indices#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

get_triplets(graph, num_atoms)

Get all input edges b->a for each output edge c->a.

get_mixed_triplets(graph_in, graph_out, num_atoms[, ...])

Get all output edges (ingoing or outgoing) for each incoming edge.

get_quadruplets(main_graph, qint_graph, num_atoms)

Get all d->b for each edge c->a and connection b->a

-
-
-fairchem.core.models.gemnet_oc.interaction_indices.get_triplets(graph, num_atoms: int)#
-

Get all input edges b->a for each output edge c->a. -It is possible that b=c, as long as the edges are distinct -(i.e. atoms b and c stem from different unit cells).

-
-
Parameters:
-
    -
  • graph (dict of torch.Tensor) – Contains the graph’s edge_index.

  • -
  • num_atoms (int) – Total number of atoms.

  • -
-
-
Returns:
-

-
in: torch.Tensor, shape (num_triplets,)

Indices of input edge b->a of each triplet b->a<-c

-
-
out: torch.Tensor, shape (num_triplets,)

Indices of output edge c->a of each triplet b->a<-c

-
-
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. -Used for creating a padded matrix and aggregating via matmul.

-
-
-

-
-
Return type:
-

Dictionary containing the entries

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.interaction_indices.get_mixed_triplets(graph_in, graph_out, num_atoms, to_outedge=False, return_adj=False, return_agg_idx=False)#
-

Get all output edges (ingoing or outgoing) for each incoming edge. -It is possible that in atom=out atom, as long as the edges are distinct -(i.e. they stem from different unit cells). In edges and out edges stem -from separate graphs (hence “mixed”) with shared atoms.

-
-
Parameters:
-
    -
  • graph_in (dict of torch.Tensor) – Contains the input graph’s edge_index and cell_offset.

  • -
  • graph_out (dict of torch.Tensor) – Contains the output graph’s edge_index and cell_offset. -Input and output graphs use the same atoms, but different edges.

  • -
  • num_atoms (int) – Total number of atoms.

  • -
  • to_outedge (bool) – Whether to map the output to the atom’s outgoing edges a->c -instead of the ingoing edges c->a.

  • -
  • return_adj (bool) – Whether to output the adjacency (incidence) matrix between output -edges and atoms adj_edges.

  • -
  • return_agg_idx (bool) – Whether to output the indices enumerating the intermediate edges -of each output edge.

  • -
-
-
Returns:
-

-
in: torch.Tensor, shape (num_triplets,)

Indices of input edges

-
-
out: torch.Tensor, shape (num_triplets,)

Indices of output edges

-
-
adj_edges: SparseTensor, shape (num_edges, num_atoms)

Adjacency (incidence) matrix between output edges and atoms, -with values specifying the input edges. -Only returned if return_adj is True.

-
-
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. -Used for creating a padded matrix and aggregating via matmul. -Only returned if return_agg_idx is True.

-
-
-

-
-
Return type:
-

Dictionary containing the entries

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.interaction_indices.get_quadruplets(main_graph, qint_graph, num_atoms)#
-

Get all d->b for each edge c->a and connection b->a -Careful about periodic images! -Separate interaction cutoff not supported.

-
-
Parameters:
-
    -
  • main_graph (dict of torch.Tensor) – Contains the main graph’s edge_index and cell_offset. -The main graph defines which edges are embedded.

  • -
  • qint_graph (dict of torch.Tensor) – Contains the quadruplet interaction graph’s edge_index and -cell_offset. main_graph and qint_graph use the same atoms, -but different edges.

  • -
  • num_atoms (int) – Total number of atoms.

  • -
-
-
Returns:
-

-
triplet_in[‘in’]: torch.Tensor, shape (nTriplets,)

Indices of input edge d->b in triplet d->b->a.

-
-
triplet_in[‘out’]: torch.Tensor, shape (nTriplets,)

Interaction indices of output edge b->a in triplet d->b->a.

-
-
triplet_out[‘in’]: torch.Tensor, shape (nTriplets,)

Interaction indices of input edge b->a in triplet c->a<-b.

-
-
triplet_out[‘out’]: torch.Tensor, shape (nTriplets,)

Indices of output edge c->a in triplet c->a<-b.

-
-
out: torch.Tensor, shape (nQuadruplets,)

Indices of output edge c->a in quadruplet

-
-
trip_in_to_quad: torch.Tensor, shape (nQuadruplets,)

Indices to map from input triplet d->b->a -to quadruplet d->b->a<-c.

-
-
trip_out_to_quad: torch.Tensor, shape (nQuadruplets,)

Indices to map from output triplet c->a<-b -to quadruplet d->b->a<-c.

-
-
out_agg: torch.Tensor, shape (num_triplets,)

Indices enumerating the intermediate edges of each output edge. -Used for creating a padded matrix and aggregating via matmul.

-
-
-

-
-
Return type:
-

Dictionary containing the entries

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.html deleted file mode 100644 index e699ec4fc..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/atom_update_block/index.html +++ /dev/null @@ -1,729 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.atom_update_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.atom_update_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.atom_update_block#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomUpdateBlock

Aggregate the message embeddings of the atoms

OutputBlock

Combines the atom update block and subsequent final dense layer.

-
-
-class fairchem.core.models.gemnet_oc.layers.atom_update_block.AtomUpdateBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, activation=None)#
-

Bases: torch.nn.Module

-

Aggregate the message embeddings of the atoms

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis.

  • -
  • nHidden (int) – Number of residual blocks.

  • -
  • activation (callable/str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-get_mlp(units_in: int, units: int, nHidden: int, activation)#
-
- -
-
-forward(h: torch.Tensor, m, basis_rad, idx_atom)#
-
-
Returns:
-

h – Atom embedding.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size_atom)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.atom_update_block.OutputBlock(emb_size_atom: int, emb_size_edge: int, emb_size_rbf: int, nHidden: int, nHidden_afteratom: int, activation: str | None = None, direct_forces: bool = True)#
-

Bases: AtomUpdateBlock

-

Combines the atom update block and subsequent final dense layer.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis.

  • -
  • nHidden (int) – Number of residual blocks before adding the atom embedding.

  • -
  • nHidden_afteratom (int) – Number of residual blocks after adding the atom embedding.

  • -
  • activation (str) – Name of the activation function to use in the dense layers.

  • -
  • direct_forces (bool) – If true directly predict forces, i.e. without taking the gradient -of the energy potential.

  • -
-
-
-
-
-forward(h: torch.Tensor, m: torch.Tensor, basis_rad, idx_atom)#
-
-
Returns:
-

    -
  • torch.Tensor, shape=(nAtoms, emb_size_atom) – Output atom embeddings.

  • -
  • torch.Tensor, shape=(nEdges, emb_size_edge) – Output edge embeddings.

  • -
-

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.html deleted file mode 100644 index eee5f3ee4..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/base_layers/index.html +++ /dev/null @@ -1,760 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.base_layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.base_layers

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.base_layers#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

Dense

Combines dense layer with scaling for silu activation.

ScaledSiLU

Base class for all neural network modules.

ResidualLayer

Residual block with output scaled by 1/sqrt(2).

-
-
-class fairchem.core.models.gemnet_oc.layers.base_layers.Dense(in_features: int, out_features: int, bias: bool = False, activation: str | None = None)#
-

Bases: torch.nn.Module

-

Combines dense layer with scaling for silu activation.

-
-
Parameters:
-
    -
  • in_features (int) – Input embedding size.

  • -
  • out_features (int) – Output embedding size.

  • -
  • bias (bool) – True if use bias.

  • -
  • activation (str) – Name of the activation function to use.

  • -
-
-
-
-
-reset_parameters(initializer=he_orthogonal_init) None#
-
- -
-
-forward(x)#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.base_layers.ScaledSiLU#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x)#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.base_layers.ResidualLayer(units: int, nLayers: int = 2, layer=Dense, **layer_kwargs)#
-

Bases: torch.nn.Module

-

Residual block with output scaled by 1/sqrt(2).

-
-
Parameters:
-
    -
  • units (int) – Input and output embedding size.

  • -
  • nLayers (int) – Number of dense layers.

  • -
  • layer (torch.nn.Module) – Class for the layers inside the residual block.

  • -
  • layer_kwargs (str) – Keyword arguments for initializing the layers.

  • -
-
-
-
-
-forward(input)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.html deleted file mode 100644 index 117c13ca6..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/basis_utils/index.html +++ /dev/null @@ -1,800 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.basis_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.basis_utils

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.basis_utils#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

Jn(r, n)

numerical spherical bessel functions of order n

Jn_zeros(n, k)

Compute the first k zeros of the spherical bessel functions

spherical_bessel_formulas(n)

Computes the sympy formulas for the spherical bessel functions

bessel_basis(n, k)

Compute the sympy formulas for the normalized and rescaled spherical bessel

sph_harm_prefactor(l_degree, m_order)

Computes the constant pre-factor for the spherical harmonic

associated_legendre_polynomials(L_maxdegree[, ...])

Computes string formulas of the associated legendre polynomials

real_sph_harm(→ None)

Computes formula strings of the the real part of the spherical harmonics

get_sph_harm_basis(L_maxdegree[, zero_m_only])

Get a function calculating the spherical harmonics basis from z and phi.

-
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.Jn(r: float, n: int)#
-

numerical spherical bessel functions of order n

-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.Jn_zeros(n: int, k: int)#
-

Compute the first k zeros of the spherical bessel functions -up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.spherical_bessel_formulas(n: int)#
-

Computes the sympy formulas for the spherical bessel functions -up to order n (excluded)

-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.bessel_basis(n: int, k: int)#
-

Compute the sympy formulas for the normalized and rescaled spherical bessel -functions up to order n (excluded) and maximum frequency k (excluded).

-
-
Returns:
-

bess_basis – Bessel basis formulas taking in a single argument x. -Has length n where each element has length k. -> In total n*k many.

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.sph_harm_prefactor(l_degree: int, m_order: int)#
-

Computes the constant pre-factor for the spherical harmonic -of degree l and order m.

-
-
Parameters:
-
    -
  • l_degree (int) – Degree of the spherical harmonic. l >= 0

  • -
  • m_order (int) – Order of the spherical harmonic. -l <= m <= l

  • -
-
-
Returns:
-

factor

-
-
Return type:
-

float

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.associated_legendre_polynomials(L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True)#
-

Computes string formulas of the associated legendre polynomials -up to degree L (excluded).

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the associated legendre polynomials -(degree L is excluded).

  • -
  • zero_m_only (bool) – If True only calculate the polynomials for the polynomials where m=0.

  • -
  • pos_m_only (bool) – If True only calculate the polynomials for the polynomials where m>=0. -Overwritten by zero_m_only.

  • -
-
-
Returns:
-

polynomials – Contains the sympy functions of the polynomials -(in total L many if zero_m_only is True else L^2 many).

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.real_sph_harm(L_maxdegree: int, use_theta: bool, use_phi: bool = True, zero_m_only: bool = True) None#
-

Computes formula strings of the the real part of the spherical harmonics -up to degree L (excluded). Variables are either spherical coordinates phi -and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.

-
-
Parameters:
-
    -
  • L_maxdegree (int) – Degree up to which to calculate the spherical harmonics -(degree L is excluded).

  • -
  • use_theta (bool) –

      -
    • True: Expects the input of the formula strings to contain theta.

    • -
    • False: Expects the input of the formula strings to contain z.

    • -
    -

  • -
  • use_phi (bool) –

      -
    • True: Expects the input of the formula strings to contain phi.

    • -
    • False: Expects the input of the formula strings to contain x and y.

    • -
    -

    Does nothing if zero_m_only is True

    -

  • -
  • zero_m_only (bool) – If True only calculate the harmonics where m=0.

  • -
-
-
Returns:
-

Y_lm_real – Computes formula strings of the the real part of the spherical -harmonics up to degree L (where degree L is not excluded). -In total L^2 many sph harm exist up to degree L (excluded). -However, if zero_m_only only is True then the total count -is reduced to L.

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.layers.basis_utils.get_sph_harm_basis(L_maxdegree: int, zero_m_only: bool = True)#
-

Get a function calculating the spherical harmonics basis from z and phi.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.html deleted file mode 100644 index 85674685d..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/efficient/index.html +++ /dev/null @@ -1,764 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.efficient — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.efficient#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

BasisEmbedding

Embed a basis (CBF, SBF), optionally using the efficient reformulation.

EfficientInteractionBilinear

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
-class fairchem.core.models.gemnet_oc.layers.efficient.BasisEmbedding(num_radial: int, emb_size_interm: int, num_spherical: int | None = None)#
-

Bases: torch.nn.Module

-

Embed a basis (CBF, SBF), optionally using the efficient reformulation.

-
-
Parameters:
-
    -
  • num_radial (int) – Number of radial basis functions.

  • -
  • emb_size_interm (int) – Intermediate embedding size of triplets/quadruplets.

  • -
  • num_spherical (int) – Number of circular/spherical basis functions. -Only required if there is a circular/spherical basis.

  • -
-
-
-
-
-weight: torch.nn.Parameter#
-
- -
-
-reset_parameters() None#
-
- -
-
-forward(rad_basis, sph_basis=None, idx_rad_outer=None, idx_rad_inner=None, idx_sph_outer=None, idx_sph_inner=None, num_atoms=None)#
-
-
Parameters:
-
    -
  • rad_basis (torch.Tensor, shape=(num_edges, num_radial or num_orders * num_radial)) – Raw radial basis.

  • -
  • sph_basis (torch.Tensor, shape=(num_triplets or num_quadruplets, num_spherical)) – Raw spherical or circular basis.

  • -
  • idx_rad_outer (torch.Tensor, shape=(num_edges)) – Atom associated with each radial basis value. -Optional, used for efficient edge aggregation.

  • -
  • idx_rad_inner (torch.Tensor, shape=(num_edges)) – Enumerates radial basis values per atom. -Optional, used for efficient edge aggregation.

  • -
  • idx_sph_outer (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Edge associated with each circular/spherical basis value. -Optional, used for efficient triplet/quadruplet aggregation.

  • -
  • idx_sph_inner (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Enumerates circular/spherical basis values per edge. -Optional, used for efficient triplet/quadruplet aggregation.

  • -
  • num_atoms (int) – Total number of atoms. -Optional, used for efficient edge aggregation.

  • -
-
-
Returns:
-

    -
  • rad_W1 (torch.Tensor, shape=(num_edges, emb_size_interm, num_spherical))

  • -
  • sph (torch.Tensor, shape=(num_edges, Kmax, num_spherical)) – Kmax = maximum number of neighbors of the edges

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.efficient.EfficientInteractionBilinear(emb_size_in: int, emb_size_interm: int, emb_size_out: int)#
-

Bases: torch.nn.Module

-

Efficient reformulation of the bilinear layer and subsequent summation.

-
-
Parameters:
-
    -
  • emb_size_in (int) – Embedding size of input triplets/quadruplets.

  • -
  • emb_size_interm (int) – Intermediate embedding size of the basis transformation.

  • -
  • emb_size_out (int) – Embedding size of output triplets/quadruplets.

  • -
-
-
-
-
-forward(basis, m, idx_agg_outer, idx_agg_inner, idx_agg2_outer=None, idx_agg2_inner=None, agg2_out_size=None)#
-
-
Parameters:
-
    -
  • basis (Tuple (torch.Tensor, torch.Tensor),) –

    -
    shapes=((num_edges, emb_size_interm, num_spherical),

    (num_edges, num_spherical, Kmax))

    -
    -
    -

    First element: Radial basis multiplied with weight matrix -Second element: Circular/spherical basis

    -

  • -
  • m (torch.Tensor, shape=(num_edges, emb_size_in)) – Input edge embeddings

  • -
  • idx_agg_outer (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Output edge aggregating this intermediate triplet/quadruplet edge.

  • -
  • idx_agg_inner (torch.Tensor, shape=(num_triplets or num_quadruplets)) – Enumerates intermediate edges per output edge.

  • -
  • idx_agg2_outer (torch.Tensor, shape=(num_edges)) – Output atom aggregating this edge.

  • -
  • idx_agg2_inner (torch.Tensor, shape=(num_edges)) – Enumerates edges per output atom.

  • -
  • agg2_out_size (int) – Number of output embeddings when aggregating twice. Typically -the number of atoms.

  • -
-
-
Returns:
-

m_ca – Aggregated edge/atom embeddings.

-
-
Return type:
-

torch.Tensor, shape=(num_edges, emb_size)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.html deleted file mode 100644 index bdcf63d7f..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/embedding_block/index.html +++ /dev/null @@ -1,719 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.embedding_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.embedding_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.embedding_block#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

AtomEmbedding

Initial atom embeddings based on the atom type

EdgeEmbedding

Edge embedding based on the concatenation of atom embeddings

-
-
-class fairchem.core.models.gemnet_oc.layers.embedding_block.AtomEmbedding(emb_size: int, num_elements: int)#
-

Bases: torch.nn.Module

-

Initial atom embeddings based on the atom type

-
-
Parameters:
-

emb_size (int) – Atom embeddings size

-
-
-
-
-forward(Z) torch.Tensor#
-
-
Returns:
-

h – Atom embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nAtoms, emb_size)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.embedding_block.EdgeEmbedding(atom_features: int, edge_features: int, out_features: int, activation: str | None = None)#
-

Bases: torch.nn.Module

-

Edge embedding based on the concatenation of atom embeddings -and a subsequent dense layer.

-
-
Parameters:
-
    -
  • atom_features (int) – Embedding size of the atom embedding.

  • -
  • edge_features (int) – Embedding size of the input edge embedding.

  • -
  • out_features (int) – Embedding size after the dense layer.

  • -
  • activation (str) – Activation function used in the dense layer.

  • -
-
-
-
-
-forward(h: torch.Tensor, m: torch.Tensor, edge_index) torch.Tensor#
-
-
Parameters:
-
    -
  • h (torch.Tensor, shape (num_atoms, atom_features)) – Atom embeddings.

  • -
  • m (torch.Tensor, shape (num_edges, edge_features)) – Radial basis in embedding block, -edge embedding in interaction block.

  • -
-
-
Returns:
-

m_st – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.html deleted file mode 100644 index 4db01d4d8..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/force_scaler/index.html +++ /dev/null @@ -1,686 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.force_scaler — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.force_scaler

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.force_scaler#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

ForceScaler

Scales up the energy and then scales down the forces

-
-
-class fairchem.core.models.gemnet_oc.layers.force_scaler.ForceScaler(init_scale: float = 2.0**8, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, max_force_iters: int = 50, enabled: bool = True)#
-

Scales up the energy and then scales down the forces -to prevent NaNs and infs in calculations using AMP. -Inspired by torch.cuda.amp.GradScaler.

-
-
-scale(energy)#
-
- -
-
-unscale(forces)#
-
- -
-
-calc_forces(energy, pos)#
-
- -
-
-calc_forces_and_update(energy, pos)#
-
- -
-
-update() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/index.html deleted file mode 100644 index b1a5754ec..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/index.html +++ /dev/null @@ -1,627 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.html deleted file mode 100644 index 0cb9d9601..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/interaction_block/index.html +++ /dev/null @@ -1,837 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.interaction_block — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.interaction_block

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.interaction_block#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

InteractionBlock

Interaction block for GemNet-Q/dQ.

QuadrupletInteraction

Quadruplet-based message passing block.

TripletInteraction

Triplet-based message passing block.

PairInteraction

Pair-based message passing block.

-
-
-class fairchem.core.models.gemnet_oc.layers.interaction_block.InteractionBlock(emb_size_atom: int, emb_size_edge: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_quad_in: int, emb_size_quad_out: int, emb_size_a2a_in: int, emb_size_a2a_out: int, emb_size_rbf: int, emb_size_cbf: int, emb_size_sbf: int, num_before_skip: int, num_after_skip: int, num_concat: int, num_atom: int, num_atom_emb_layers: int = 0, quad_interaction: bool = False, atom_edge_interaction: bool = False, edge_atom_interaction: bool = False, atom_interaction: bool = False, activation=None)#
-

Bases: torch.nn.Module

-

Interaction block for GemNet-Q/dQ.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_a2a_in (int) – Embedding size in the atom interaction before the bilinear layer.

  • -
  • emb_size_a2a_out (int) – Embedding size in the atom interaction after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • -
  • num_before_skip (int) – Number of residual blocks before the first skip connection.

  • -
  • num_after_skip (int) – Number of residual blocks after the first skip connection.

  • -
  • num_concat (int) – Number of residual blocks after the concatenation.

  • -
  • num_atom (int) – Number of residual blocks in the atom embedding blocks.

  • -
  • num_atom_emb_layers (int) – Number of residual blocks for transforming atom embeddings.

  • -
  • quad_interaction (bool) – Whether to use quadruplet interactions.

  • -
  • atom_edge_interaction (bool) – Whether to use atom-to-edge interactions.

  • -
  • edge_atom_interaction (bool) – Whether to use edge-to-atom interactions.

  • -
  • atom_interaction (bool) – Whether to use atom-to-atom interactions.

  • -
  • activation (str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-forward(h, m, bases_qint, bases_e2e, bases_a2e, bases_e2a, basis_a2a_rad, basis_atom_update, edge_index_main, a2ee2a_graph, a2a_graph, id_swap, trip_idx_e2e, trip_idx_a2e, trip_idx_e2a, quad_idx)#
-
-
Returns:
-

    -
  • h (torch.Tensor, shape=(nEdges, emb_size_atom)) – Atom embeddings.

  • -
  • m (torch.Tensor, shape=(nEdges, emb_size_edge)) – Edge embeddings (c->a).

  • -
-

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.interaction_block.QuadrupletInteraction(emb_size_edge, emb_size_quad_in, emb_size_quad_out, emb_size_rbf, emb_size_cbf, emb_size_sbf, symmetric_mp=True, activation=None)#
-

Bases: torch.nn.Module

-

Quadruplet-based message passing block.

-
-
Parameters:
-
    -
  • emb_size_edge (int) – Embedding size of the edges.

  • -
  • emb_size_quad_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_quad_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • emb_size_sbf (int) – Embedding size of the spherical basis transformation (two angles).

  • -
  • symmetric_mp (bool) – Whether to use symmetric message passing and -update the edges in both directions.

  • -
  • activation (str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-forward(m, bases, idx, id_swap)#
-
-
Returns:
-

m – Edge embeddings (c->a).

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size_edge)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.interaction_block.TripletInteraction(emb_size_in: int, emb_size_out: int, emb_size_trip_in: int, emb_size_trip_out: int, emb_size_rbf: int, emb_size_cbf: int, symmetric_mp: bool = True, swap_output: bool = True, activation=None)#
-

Bases: torch.nn.Module

-

Triplet-based message passing block.

-
-
Parameters:
-
    -
  • emb_size_in (int) – Embedding size of the input embeddings.

  • -
  • emb_size_out (int) – Embedding size of the output embeddings.

  • -
  • emb_size_trip_in (int) – (Down-projected) embedding size of the quadruplet edge embeddings -before the bilinear layer.

  • -
  • emb_size_trip_out (int) – (Down-projected) embedding size of the quadruplet edge embeddings -after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • emb_size_cbf (int) – Embedding size of the circular basis transformation (one angle).

  • -
  • symmetric_mp (bool) – Whether to use symmetric message passing and -update the edges in both directions.

  • -
  • swap_output (bool) – Whether to swap the output embedding directions. -Only relevant if symmetric_mp is False.

  • -
  • activation (str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-forward(m, bases, idx, id_swap, expand_idx=None, idx_agg2=None, idx_agg2_inner=None, agg2_out_size=None)#
-
-
Returns:
-

m – Edge embeddings.

-
-
Return type:
-

torch.Tensor, shape=(nEdges, emb_size_edge)

-
-
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.interaction_block.PairInteraction(emb_size_atom, emb_size_pair_in, emb_size_pair_out, emb_size_rbf, activation=None)#
-

Bases: torch.nn.Module

-

Pair-based message passing block.

-
-
Parameters:
-
    -
  • emb_size_atom (int) – Embedding size of the atoms.

  • -
  • emb_size_pair_in (int) – Embedding size of the atom pairs before the bilinear layer.

  • -
  • emb_size_pair_out (int) – Embedding size of the atom pairs after the bilinear layer.

  • -
  • emb_size_rbf (int) – Embedding size of the radial basis transformation.

  • -
  • activation (str) – Name of the activation function to use in the dense layers.

  • -
-
-
-
-
-forward(h, rad_basis, edge_index, target_neighbor_idx)#
-
-
Returns:
-

h – Atom embeddings.

-
-
Return type:
-

torch.Tensor, shape=(num_atoms, emb_size_atom)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.html deleted file mode 100644 index 00b0e6215..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/radial_basis/index.html +++ /dev/null @@ -1,846 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.radial_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.radial_basis#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - -

PolynomialEnvelope

Polynomial envelope function that ensures a smooth cutoff.

ExponentialEnvelope

Exponential envelope function that ensures a smooth cutoff,

GaussianBasis

Base class for all neural network modules.

SphericalBesselBasis

First-order spherical Bessel basis

BernsteinBasis

Bernstein polynomial basis,

RadialBasis

-
param num_radial:
-

Number of basis functions. Controls the maximum frequency.

-
-
-

-
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.PolynomialEnvelope(exponent: int)#
-

Bases: torch.nn.Module

-

Polynomial envelope function that ensures a smooth cutoff.

-
-
Parameters:
-

exponent (int) – Exponent of the envelope function.

-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.ExponentialEnvelope#
-

Bases: torch.nn.Module

-

Exponential envelope function that ensures a smooth cutoff, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.GaussianBasis(start: float = 0.0, stop: float = 5.0, num_gaussians: int = 50, trainable: bool = False)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.SphericalBesselBasis(num_radial: int, cutoff: float)#
-

Bases: torch.nn.Module

-

First-order spherical Bessel basis

-
-
Parameters:
-
    -
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.BernsteinBasis(num_radial: int, pregamma_initial: float = 0.45264)#
-

Bases: torch.nn.Module

-

Bernstein polynomial basis, -as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021. -SpookyNet: Learning Force Fields with Electronic Degrees of Freedom -and Nonlocal Effects

-
-
Parameters:
-
    -
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • -
  • pregamma_initial (float) – Initial value of exponential coefficient gamma. -Default: gamma = 0.5 * a_0**-1 = 0.94486, -inverse softplus -> pregamma = log e**gamma - 1 = 0.45264

  • -
-
-
-
-
-forward(d_scaled: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis(num_radial: int, cutoff: float, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, scale_basis: bool = False)#
-

Bases: torch.nn.Module

-
-
Parameters:
-
    -
  • num_radial (int) – Number of basis functions. Controls the maximum frequency.

  • -
  • cutoff (float) – Cutoff distance in Angstrom.

  • -
  • rbf (dict = {"name": "gaussian"}) – Basis function and its hyperparameters.

  • -
  • envelope (dict = {"name": "polynomial", "exponent": 5}) – Envelope function and its hyperparameters.

  • -
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • -
-
-
-
-
-forward(d: torch.Tensor) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.html b/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.html deleted file mode 100644 index 9ab559c24..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/layers/spherical_basis/index.html +++ /dev/null @@ -1,700 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.layers.spherical_basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.gemnet_oc.layers.spherical_basis

- -
- -
-
- - - - -
- -
-

fairchem.core.models.gemnet_oc.layers.spherical_basis#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

CircularBasisLayer

2D Fourier Bessel Basis

SphericalBasisLayer

3D Fourier Bessel Basis

-
-
-class fairchem.core.models.gemnet_oc.layers.spherical_basis.CircularBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis, cbf: dict, scale_basis: bool = False)#
-

Bases: torch.nn.Module

-

2D Fourier Bessel Basis

-
-
Parameters:
-
    -
  • num_spherical (int) – Number of basis functions. Controls the maximum frequency.

  • -
  • radial_basis (RadialBasis) – Radial basis function.

  • -
  • cbf (dict) – Name and hyperparameters of the circular basis function.

  • -
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • -
-
-
-
-
-forward(D_ca, cosφ_cab)#
-
- -
- -
-
-class fairchem.core.models.gemnet_oc.layers.spherical_basis.SphericalBasisLayer(num_spherical: int, radial_basis: fairchem.core.models.gemnet_oc.layers.radial_basis.RadialBasis, sbf: dict, scale_basis: bool = False)#
-

Bases: torch.nn.Module

-

3D Fourier Bessel Basis

-
-
Parameters:
-
    -
  • num_spherical (int) – Number of basis functions. Controls the maximum frequency.

  • -
  • radial_basis (RadialBasis) – Radial basis functions.

  • -
  • sbf (dict) – Name and hyperparameters of the spherical basis function.

  • -
  • scale_basis (bool) – Whether to scale the basis values for better numerical stability.

  • -
-
-
-
-
-forward(D_ca, cosφ_cab, θ_cabd)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/gemnet_oc/utils/index.html b/autoapi/fairchem/core/models/gemnet_oc/utils/index.html deleted file mode 100644 index f536f1bed..000000000 --- a/autoapi/fairchem/core/models/gemnet_oc/utils/index.html +++ /dev/null @@ -1,870 +0,0 @@ - - - - - - - - - - - fairchem.core.models.gemnet_oc.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.gemnet_oc.utils#

-

Copyright (c) Meta, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

ragged_range(sizes)

Multiple concatenated ranges.

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

masked_select_sparsetensor_flat(...)

calculate_interatomic_vectors(R, id_s, id_t, offsets_st)

Calculate the vectors connecting the given atom pairs,

inner_product_clamped(→ torch.Tensor)

Calculate the inner product between the given normalized vectors,

get_angle(→ torch.Tensor)

Calculate angles between atoms c -> a <- b.

vector_rejection(R_ab, P_n)

Project the vector R_ab onto a plane with normal vector P_n.

get_projected_angle(→ torch.Tensor)

Project the vector R_ab onto a plane with normal vector P_n,

mask_neighbors(neighbors, edge_mask)

get_neighbor_order(→ torch.Tensor)

Give a mask that filters out edges so that each atom has at most

get_inner_idx(idx, dim_size)

Assign an inner index to each element (neighbor) with the same index.

get_edge_id(edge_idx, cell_offsets, num_atoms)

-
-
-fairchem.core.models.gemnet_oc.utils.ragged_range(sizes)#
-

Multiple concatenated ranges.

-

Examples

-

sizes = [1 4 2 3] -Return: [0 0 1 2 3 0 1 0 1 2]

-
- -
-
-fairchem.core.models.gemnet_oc.utils.repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
-

Repeat blocks of indices. -Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

-

continuous_indexing: Whether to keep increasing the index after each block -start_idx: Starting index -block_inc: Number to increment by after each block,

-
-

either global or per block. Shape: len(sizes) - 1

-
-
-
repeat_inc: Number to increment by after each repetition,

either global or per block

-
-
-

Examples

-

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False -Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -repeat_inc = 4 -Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -start_idx = 5 -Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -block_inc = 1 -Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] -sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 1 2 0 1 2 3 4 3 4 3 4] -sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True -Return: [0 1 0 1 5 6 5 6]

-
- -
-
-fairchem.core.models.gemnet_oc.utils.masked_select_sparsetensor_flat(src, mask) torch_sparse.SparseTensor#
-
- -
-
-fairchem.core.models.gemnet_oc.utils.calculate_interatomic_vectors(R, id_s, id_t, offsets_st)#
-

Calculate the vectors connecting the given atom pairs, -considering offsets from periodic boundary conditions (PBC).

-
-
Parameters:
-
    -
  • R (Tensor, shape = (nAtoms, 3)) – Atom positions.

  • -
  • id_s (Tensor, shape = (nEdges,)) – Indices of the source atom of the edges.

  • -
  • id_t (Tensor, shape = (nEdges,)) – Indices of the target atom of the edges.

  • -
  • offsets_st (Tensor, shape = (nEdges,)) – PBC offsets of the edges. -Subtract this from the correct direction.

  • -
-
-
Returns:
-

(D_st, V_st)

-
-
D_st: Tensor, shape = (nEdges,)

Distance from atom t to s.

-
-
V_st: Tensor, shape = (nEdges,)

Unit direction from atom t to s.

-
-
-

-
-
Return type:
-

tuple

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.utils.inner_product_clamped(x, y) torch.Tensor#
-

Calculate the inner product between the given normalized vectors, -giving a result between -1 and 1.

-
- -
-
-fairchem.core.models.gemnet_oc.utils.get_angle(R_ac, R_ab) torch.Tensor#
-

Calculate angles between atoms c -> a <- b.

-
-
Parameters:
-
    -
  • R_ac (Tensor, shape = (N, 3)) – Vector from atom a to c.

  • -
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • -
-
-
Returns:
-

angle_cab – Angle between atoms c <- a -> b.

-
-
Return type:
-

Tensor, shape = (N,)

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.utils.vector_rejection(R_ab, P_n)#
-

Project the vector R_ab onto a plane with normal vector P_n.

-
-
Parameters:
-
    -
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • -
  • P_n (Tensor, shape = (N, 3)) – Normal vector of a plane onto which to project R_ab.

  • -
-
-
Returns:
-

R_ab_proj – Projected vector (orthogonal to P_n).

-
-
Return type:
-

Tensor, shape = (N, 3)

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.utils.get_projected_angle(R_ab, P_n, eps: float = 0.0001) torch.Tensor#
-

Project the vector R_ab onto a plane with normal vector P_n, -then calculate the angle w.r.t. the (x [cross] P_n), -or (y [cross] P_n) if the former would be ill-defined/numerically unstable.

-
-
Parameters:
-
    -
  • R_ab (Tensor, shape = (N, 3)) – Vector from atom a to b.

  • -
  • P_n (Tensor, shape = (N, 3)) – Normal vector of a plane onto which to project R_ab.

  • -
  • eps (float) – Norm of projection below which to use the y-axis instead of x.

  • -
-
-
Returns:
-

angle_ab – Angle on plane w.r.t. x- or y-axis.

-
-
Return type:
-

Tensor, shape = (N)

-
-
-
- -
-
-fairchem.core.models.gemnet_oc.utils.mask_neighbors(neighbors, edge_mask)#
-
- -
-
-fairchem.core.models.gemnet_oc.utils.get_neighbor_order(num_atoms: int, index, atom_distance) torch.Tensor#
-

Give a mask that filters out edges so that each atom has at most -max_num_neighbors_threshold neighbors.

-
- -
-
-fairchem.core.models.gemnet_oc.utils.get_inner_idx(idx, dim_size)#
-

Assign an inner index to each element (neighbor) with the same index. -For example, with idx=[0 0 0 1 1 1 1 2 2] this returns [0 1 2 0 1 2 3 0 1]. -These indices allow reshape neighbor indices into a dense matrix. -idx has to be sorted for this to work.

-
- -
-
-fairchem.core.models.gemnet_oc.utils.get_edge_id(edge_idx, cell_offsets, num_atoms: int)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/index.html b/autoapi/fairchem/core/models/index.html deleted file mode 100644 index 874df0e16..000000000 --- a/autoapi/fairchem/core/models/index.html +++ /dev/null @@ -1,796 +0,0 @@ - - - - - - - - - - - fairchem.core.models — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models

- -
- -
-
- - - - -
- -
-

fairchem.core.models#

-
-

Subpackages#

-
- -
-
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Functions#

- - - - - - -

model_name_to_local_file(→ str)

Download a pretrained checkpoint if it does not exist already

-
-
-

Attributes#

- - - - - - -

available_pretrained_models

-
-
-fairchem.core.models.available_pretrained_models#
-
- -
-
-fairchem.core.models.model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) str#
-

Download a pretrained checkpoint if it does not exist already

-
-
Parameters:
-
    -
  • model_name (str) – the model name. See available_pretrained_checkpoints.

  • -
  • local_cache (str or Path) – path to local cache directory

  • -
-
-
Returns:
-

local path to checkpoint file

-
-
Return type:
-

str

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/model_registry/index.html b/autoapi/fairchem/core/models/model_registry/index.html deleted file mode 100644 index ea1aed827..000000000 --- a/autoapi/fairchem/core/models/model_registry/index.html +++ /dev/null @@ -1,688 +0,0 @@ - - - - - - - - - - - fairchem.core.models.model_registry — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.model_registry

- -
- -
-
- - - - -
- -
-

fairchem.core.models.model_registry#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - -

model_name_to_local_file(→ str)

Download a pretrained checkpoint if it does not exist already

-
-
-

Attributes#

- - - - - - - - - -

MODEL_REGISTRY

available_pretrained_models

-
-
-fairchem.core.models.model_registry.MODEL_REGISTRY#
-
- -
-
-fairchem.core.models.model_registry.available_pretrained_models#
-
- -
-
-fairchem.core.models.model_registry.model_name_to_local_file(model_name: str, local_cache: str | pathlib.Path) str#
-

Download a pretrained checkpoint if it does not exist already

-
-
Parameters:
-
    -
  • model_name (str) – the model name. See available_pretrained_checkpoints.

  • -
  • local_cache (str or Path) – path to local cache directory

  • -
-
-
Returns:
-

local path to checkpoint file

-
-
Return type:
-

str

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/painn/index.html b/autoapi/fairchem/core/models/painn/index.html deleted file mode 100644 index 814451823..000000000 --- a/autoapi/fairchem/core/models/painn/index.html +++ /dev/null @@ -1,720 +0,0 @@ - - - - - - - - - - - fairchem.core.models.painn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.painn#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

PaiNN

PaiNN model based on the description in Schütt et al. (2021):

-
-
-class fairchem.core.models.painn.PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

PaiNN model based on the description in Schütt et al. (2021): -Equivariant message passing for the prediction of tensorial properties -and molecular spectra, https://arxiv.org/abs/2102.03150.

-
-
-property num_params: int#
-
- -
-
-reset_parameters() None#
-
- -
-
-select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
-
- -
-
-symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg)#
-

Symmetrize edges to ensure existence of counter-directional edges.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. -If symmetric_edge_symmetrization is False, -we only use i->j edges here. So we lose some j->i edges -and add others by making it symmetric. -If symmetric_edge_symmetrization is True, -we always use both directions.

-
- -
-
-generate_graph_values(data)#
-
- -
-
-forward(data)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/painn/painn/index.html b/autoapi/fairchem/core/models/painn/painn/index.html deleted file mode 100644 index 424e14a8e..000000000 --- a/autoapi/fairchem/core/models/painn/painn/index.html +++ /dev/null @@ -1,1010 +0,0 @@ - - - - - - - - - - - fairchem.core.models.painn.painn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.painn.painn#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

-

MIT License

-

Copyright (c) 2021 www.compscience.org

-

Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Software”), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions:

-

The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software.

-

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

PaiNN

PaiNN model based on the description in Schütt et al. (2021):

PaiNNMessage

Base class for creating message passing layers of the form

PaiNNUpdate

Base class for all neural network modules.

PaiNNOutput

Base class for all neural network modules.

GatedEquivariantBlock

Gated Equivariant Block as defined in Schütt et al. (2021):

-
-
-class fairchem.core.models.painn.painn.PaiNN(num_atoms: int, bond_feat_dim: int, num_targets: int, hidden_channels: int = 512, num_layers: int = 6, num_rbf: int = 128, cutoff: float = 12.0, max_neighbors: int = 50, rbf: dict[str, str] | None = None, envelope: dict[str, str | int] | None = None, regress_forces: bool = True, direct_forces: bool = True, use_pbc: bool = True, otf_graph: bool = True, num_elements: int = 83, scale_file: str | None = None)#
-

Bases: fairchem.core.models.base.BaseModel

-

PaiNN model based on the description in Schütt et al. (2021): -Equivariant message passing for the prediction of tensorial properties -and molecular spectra, https://arxiv.org/abs/2102.03150.

-
-
-property num_params: int#
-
- -
-
-reset_parameters() None#
-
- -
-
-select_symmetric_edges(tensor, mask, reorder_idx, inverse_neg) torch.Tensor#
-
- -
-
-symmetrize_edges(edge_index, cell_offsets, neighbors, batch_idx, reorder_tensors, reorder_tensors_invneg)#
-

Symmetrize edges to ensure existence of counter-directional edges.

-

Some edges are only present in one direction in the data, -since every atom has a maximum number of neighbors. -If symmetric_edge_symmetrization is False, -we only use i->j edges here. So we lose some j->i edges -and add others by making it symmetric. -If symmetric_edge_symmetrization is True, -we always use both directions.

-
- -
-
-generate_graph_values(data)#
-
- -
-
-forward(data)#
-
- -
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-class fairchem.core.models.painn.painn.PaiNNMessage(hidden_channels, num_rbf)#
-

Bases: torch_geometric.nn.MessagePassing

-

Base class for creating message passing layers of the form

-
-\[\mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, -\bigoplus_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} -\left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right),\]
-

where \(\bigoplus\) denotes a differentiable, permutation invariant -function, e.g., sum, mean, min, max or mul, and -\(\gamma_{\mathbf{\Theta}}\) and \(\phi_{\mathbf{\Theta}}\) denote -differentiable functions such as MLPs. -See here for the accompanying tutorial.

-
-
Parameters:
-
    -
  • aggr (str or [str] or Aggregation, optional) – The aggregation scheme -to use, e.g., "add", "sum" "mean", -"min", "max" or "mul". -In addition, can be any -Aggregation module (or any string -that automatically resolves to it). -If given as a list, will make use of multiple aggregations in which -different outputs will get concatenated in the last dimension. -If set to None, the MessagePassing instantiation is -expected to implement its own aggregation logic via -aggregate(). (default: "add")

  • -
  • aggr_kwargs (Dict[str, Any], optional) – Arguments passed to the -respective aggregation function in case it gets automatically -resolved. (default: None)

  • -
  • flow (str, optional) – The flow direction of message passing -("source_to_target" or "target_to_source"). -(default: "source_to_target")

  • -
  • node_dim (int, optional) – The axis along which to propagate. -(default: -2)

  • -
  • decomposed_layers (int, optional) – The number of feature decomposition -layers, as introduced in the “Optimizing Memory Efficiency of -Graph Neural Networks on Edge Computing Platforms” paper. -Feature decomposition reduces the peak memory usage by slicing -the feature dimensions into separated feature decomposition layers -during GNN aggregation. -This method can accelerate GNN execution on CPU-based platforms -(e.g., 2-3x speedup on the -Reddit dataset) for common GNN -models such as GCN, -GraphSAGE, -GIN, etc. -However, this method is not applicable to all GNN operators -available, in particular for operators in which message computation -can not easily be decomposed, e.g. in attention-based GNNs. -The selection of the optimal value of decomposed_layers -depends both on the specific graph dataset and available hardware -resources. -A value of 2 is suitable in most cases. -Although the peak memory usage is directly associated with the -granularity of feature decomposition, the same is not necessarily -true for execution speedups. (default: 1)

  • -
-
-
-
-
-reset_parameters() None#
-

Resets all learnable parameters of the module.

-
- -
-
-forward(x, vec, edge_index, edge_rbf, edge_vector)#
-

Runs the forward pass of the module.

-
- -
-
-message(xh_j, vec_j, rbfh_ij, r_ij)#
-

Constructs messages from node \(j\) to node \(i\) -in analogy to \(\phi_{\mathbf{\Theta}}\) for each edge in -edge_index. -This function can take any argument as input which was initially -passed to propagate(). -Furthermore, tensors passed to propagate() can be mapped to the -respective nodes \(i\) and \(j\) by appending _i or -_j to the variable name, .e.g. x_i and x_j.

-
- -
-
-aggregate(features: tuple[torch.Tensor, torch.Tensor], index: torch.Tensor, dim_size: int) tuple[torch.Tensor, torch.Tensor]#
-

Aggregates messages from neighbors as -\(\bigoplus_{j \in \mathcal{N}(i)}\).

-

Takes in the output of message computation as first argument and any -argument which was initially passed to propagate().

-

By default, this function will delegate its call to the underlying -Aggregation module to reduce messages -as specified in __init__() by the aggr argument.

-
- -
-
-update(inputs: tuple[torch.Tensor, torch.Tensor]) tuple[torch.Tensor, torch.Tensor]#
-

Updates node embeddings in analogy to -\(\gamma_{\mathbf{\Theta}}\) for each node -\(i \in \mathcal{V}\). -Takes in the output of aggregation as first argument and any argument -which was initially passed to propagate().

-
- -
- -
-
-class fairchem.core.models.painn.painn.PaiNNUpdate(hidden_channels)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(x, vec)#
-
- -
- -
-
-class fairchem.core.models.painn.painn.PaiNNOutput(hidden_channels)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-reset_parameters() None#
-
- -
-
-forward(x, vec)#
-
- -
- -
-
-class fairchem.core.models.painn.painn.GatedEquivariantBlock(hidden_channels, out_channels)#
-

Bases: torch.nn.Module

-

Gated Equivariant Block as defined in Schütt et al. (2021): -Equivariant message passing for the prediction of tensorial properties and molecular spectra

-
-
-reset_parameters() None#
-
- -
-
-forward(x, v)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/painn/utils/index.html b/autoapi/fairchem/core/models/painn/utils/index.html deleted file mode 100644 index 93225bf37..000000000 --- a/autoapi/fairchem/core/models/painn/utils/index.html +++ /dev/null @@ -1,684 +0,0 @@ - - - - - - - - - - - fairchem.core.models.painn.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.painn.utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.painn.utils#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

repeat_blocks(→ torch.Tensor)

Repeat blocks of indices.

get_edge_id(edge_idx, cell_offsets, num_atoms)

-
-
-fairchem.core.models.painn.utils.repeat_blocks(sizes, repeats, continuous_indexing: bool = True, start_idx: int = 0, block_inc: int = 0, repeat_inc: int = 0) torch.Tensor#
-

Repeat blocks of indices. -Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements

-

continuous_indexing: Whether to keep increasing the index after each block -start_idx: Starting index -block_inc: Number to increment by after each block,

-
-

either global or per block. Shape: len(sizes) - 1

-
-
-
repeat_inc: Number to increment by after each repetition,

either global or per block

-
-
-

Examples

-

sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False -Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -repeat_inc = 4 -Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -start_idx = 5 -Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10] -sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ; -block_inc = 1 -Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7] -sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True -Return: [0 1 2 0 1 2 3 4 3 4 3 4] -sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True -Return: [0 1 0 1 5 6 5 6]

-
- -
-
-fairchem.core.models.painn.utils.get_edge_id(edge_idx, cell_offsets, num_atoms: int)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/schnet/index.html b/autoapi/fairchem/core/models/schnet/index.html deleted file mode 100644 index cf76a3718..000000000 --- a/autoapi/fairchem/core/models/schnet/index.html +++ /dev/null @@ -1,720 +0,0 @@ - - - - - - - - - - - fairchem.core.models.schnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.schnet

- -
- -
-
- - - - -
- -
-

fairchem.core.models.schnet#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

SchNetWrap

Wrapper around the continuous-filter convolutional neural network SchNet from the

-
-
-class fairchem.core.models.schnet.SchNetWrap(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, hidden_channels: int = 128, num_filters: int = 128, num_interactions: int = 6, num_gaussians: int = 50, cutoff: float = 10.0, readout: str = 'add')#
-

Bases: torch_geometric.nn.SchNet, fairchem.core.models.base.BaseModel

-

Wrapper around the continuous-filter convolutional neural network SchNet from the -“SchNet: A Continuous-filter Convolutional Neural Network for Modeling -Quantum Interactions”. Each layer uses interaction -block of the form:

-
-\[\mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot -h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))),\]
-
-
Parameters:
-
    -
  • num_atoms (int) – Unused argument

  • -
  • bond_feat_dim (int) – Unused argument

  • -
  • num_targets (int) – Number of targets to predict.

  • -
  • use_pbc (bool, optional) – If set to True, account for periodic boundary conditions. -(default: True)

  • -
  • regress_forces (bool, optional) – If set to True, predict forces by differentiating -energy with respect to positions. -(default: True)

  • -
  • otf_graph (bool, optional) – If set to True, compute graph edges on the fly. -(default: False)

  • -
  • hidden_channels (int, optional) – Number of hidden channels. -(default: 128)

  • -
  • num_filters (int, optional) – Number of filters to use. -(default: 128)

  • -
  • num_interactions (int, optional) – Number of interaction blocks -(default: 6)

  • -
  • num_gaussians (int, optional) – The number of gaussians \(\mu\). -(default: 50)

  • -
  • cutoff (float, optional) – Cutoff distance for interatomic interactions. -(default: 10.0)

  • -
  • readout (string, optional) – Whether to apply "add" or -"mean" global aggregation. (default: "add")

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-_forward(data)#
-
- -
-
-forward(data)#
-
-
Parameters:
-
    -
  • z (torch.Tensor) – Atomic number of each atom with shape -[num_atoms].

  • -
  • pos (torch.Tensor) – Coordinates of each atom with shape -[num_atoms, 3].

  • -
  • batch (torch.Tensor, optional) – Batch indices assigning each atom -to a separate molecule with shape [num_atoms]. -(default: None)

  • -
-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/scn/index.html b/autoapi/fairchem/core/models/scn/index.html deleted file mode 100644 index 6a9a7bcb1..000000000 --- a/autoapi/fairchem/core/models/scn/index.html +++ /dev/null @@ -1,766 +0,0 @@ - - - - - - - - - - - fairchem.core.models.scn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.scn#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

SphericalChannelNetwork

Spherical Channel Network

-
-
-class fairchem.core.models.scn.SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True)#
-

Bases: fairchem.core.models.base.BaseModel

-

Spherical Channel Network -Paper: Spherical Channels for Modeling Atomic Interactions

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_num_neighbors (int) – Maximum number of neighbors per atom

  • -
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_interactions (int) – Number of layers in the GNN

  • -
  • lmax (int) – Maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax (int) – Maximum order of the spherical harmonics (0 or 1)

  • -
  • num_resolutions (int) – Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • sphere_channels_reduce (int) – Number of spherical channels used during message passing (downsample or upsample)

  • -
  • hidden_channels (int) – Number of hidden units in message passing

  • -
  • num_taps (int) – Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)

  • -
  • use_grid (bool) – Use non-linear pointwise convolution during aggregation

  • -
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • num_basis_functions (int) – Number of basis functions used for distance and atomic number blocks

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • basis_width_scalar (float) – Width of distance basis function

  • -
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • -
  • show_timing_info (bool) – Show timing and memory info

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-energy_fc1: torch.nn.Linear#
-
- -
-
-energy_fc2: torch.nn.Linear#
-
- -
-
-energy_fc3: torch.nn.Linear#
-
- -
-
-force_fc1: torch.nn.Linear#
-
- -
-
-force_fc2: torch.nn.Linear#
-
- -
-
-force_fc3: torch.nn.Linear#
-
- -
-
-forward(data)#
-
- -
-
-_forward_helper(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
-
-_rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/scn/sampling/index.html b/autoapi/fairchem/core/models/scn/sampling/index.html deleted file mode 100644 index a26cc0b6d..000000000 --- a/autoapi/fairchem/core/models/scn/sampling/index.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - - fairchem.core.models.scn.sampling — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.scn.sampling

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.scn.sampling#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

CalcSpherePoints(→ torch.Tensor)

CalcSpherePointsRandom(→ torch.Tensor)

-
-
-fairchem.core.models.scn.sampling.CalcSpherePoints(num_points: int, device: str = 'cpu') torch.Tensor#
-
- -
-
-fairchem.core.models.scn.sampling.CalcSpherePointsRandom(num_points: int, device) torch.Tensor#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/scn/scn/index.html b/autoapi/fairchem/core/models/scn/scn/index.html deleted file mode 100644 index 7ba037cb8..000000000 --- a/autoapi/fairchem/core/models/scn/scn/index.html +++ /dev/null @@ -1,915 +0,0 @@ - - - - - - - - - - - fairchem.core.models.scn.scn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.scn.scn#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

SphericalChannelNetwork

Spherical Channel Network

EdgeBlock

Base class for all neural network modules.

MessageBlock

Base class for all neural network modules.

DistanceBlock

Base class for all neural network modules.

-
-
-class fairchem.core.models.scn.scn.SphericalChannelNetwork(num_atoms: int, bond_feat_dim: int, num_targets: int, use_pbc: bool = True, regress_forces: bool = True, otf_graph: bool = False, max_num_neighbors: int = 20, cutoff: float = 8.0, max_num_elements: int = 90, num_interactions: int = 8, lmax: int = 6, mmax: int = 1, num_resolutions: int = 2, sphere_channels: int = 128, sphere_channels_reduce: int = 128, hidden_channels: int = 256, num_taps: int = -1, use_grid: bool = True, num_bands: int = 1, num_sphere_samples: int = 128, num_basis_functions: int = 128, distance_function: str = 'gaussian', basis_width_scalar: float = 1.0, distance_resolution: float = 0.02, show_timing_info: bool = False, direct_forces: bool = True)#
-

Bases: fairchem.core.models.base.BaseModel

-

Spherical Channel Network -Paper: Spherical Channels for Modeling Atomic Interactions

-
-
Parameters:
-
    -
  • use_pbc (bool) – Use periodic boundary conditions

  • -
  • regress_forces (bool) – Compute forces

  • -
  • otf_graph (bool) – Compute graph On The Fly (OTF)

  • -
  • max_num_neighbors (int) – Maximum number of neighbors per atom

  • -
  • cutoff (float) – Maximum distance between nieghboring atoms in Angstroms

  • -
  • max_num_elements (int) – Maximum atomic number

  • -
  • num_interactions (int) – Number of layers in the GNN

  • -
  • lmax (int) – Maximum degree of the spherical harmonics (1 to 10)

  • -
  • mmax (int) – Maximum order of the spherical harmonics (0 or 1)

  • -
  • num_resolutions (int) – Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)

  • -
  • sphere_channels (int) – Number of spherical channels

  • -
  • sphere_channels_reduce (int) – Number of spherical channels used during message passing (downsample or upsample)

  • -
  • hidden_channels (int) – Number of hidden units in message passing

  • -
  • num_taps (int) – Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)

  • -
  • use_grid (bool) – Use non-linear pointwise convolution during aggregation

  • -
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • -
  • num_sphere_samples (int) – Number of samples used to approximate the integration of the sphere in the output blocks

  • -
  • num_basis_functions (int) – Number of basis functions used for distance and atomic number blocks

  • -
  • distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu") – Basis function used for distances

  • -
  • basis_width_scalar (float) – Width of distance basis function

  • -
  • distance_resolution (float) – Distance between distance basis functions in Angstroms

  • -
  • show_timing_info (bool) – Show timing and memory info

  • -
-
-
-
-
-property num_params: int#
-
- -
-
-energy_fc1: torch.nn.Linear#
-
- -
-
-energy_fc2: torch.nn.Linear#
-
- -
-
-energy_fc3: torch.nn.Linear#
-
- -
-
-force_fc1: torch.nn.Linear#
-
- -
-
-force_fc2: torch.nn.Linear#
-
- -
-
-force_fc3: torch.nn.Linear#
-
- -
-
-forward(data)#
-
- -
-
-_forward_helper(data)#
-
- -
-
-_init_edge_rot_mat(data, edge_index, edge_distance_vec)#
-
- -
-
-_rank_edge_distances(edge_distance, edge_index, max_num_neighbors: int) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.scn.scn.EdgeBlock(num_resolutions: int, sphere_channels_reduce, hidden_channels_list, cutoff_list, sphharm_list, sphere_channels, distance_expansion, max_num_elements: int, num_basis_functions: int, num_gaussians: int, use_grid: bool, act)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x, atomic_numbers, edge_distance, edge_index, cutoff_index)#
-
- -
- -
-
-class fairchem.core.models.scn.scn.MessageBlock(sphere_channels_reduce, hidden_channels, num_basis_functions, sphharm, act)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x, x_edge, edge_index)#
-
- -
- -
-
-class fairchem.core.models.scn.scn.DistanceBlock(in_channels, num_basis_functions: int, distance_expansion, max_num_elements: int, act)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(edge_distance, source_element, target_element)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/scn/smearing/index.html b/autoapi/fairchem/core/models/scn/smearing/index.html deleted file mode 100644 index df80b6a8e..000000000 --- a/autoapi/fairchem/core/models/scn/smearing/index.html +++ /dev/null @@ -1,846 +0,0 @@ - - - - - - - - - - - fairchem.core.models.scn.smearing — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.scn.smearing#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

GaussianSmearing

Base class for all neural network modules.

SigmoidSmearing

Base class for all neural network modules.

LinearSigmoidSmearing

Base class for all neural network modules.

SiLUSmearing

Base class for all neural network modules.

-
-
-class fairchem.core.models.scn.smearing.GaussianSmearing(start: float = -5.0, stop: float = 5.0, num_gaussians: int = 50, basis_width_scalar: float = 1.0)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.scn.smearing.SigmoidSmearing(start=-5.0, stop=5.0, num_sigmoid=50, basis_width_scalar=1.0)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.scn.smearing.LinearSigmoidSmearing(start: float = -5.0, stop: float = 5.0, num_sigmoid: int = 50, basis_width_scalar: float = 1.0)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.scn.smearing.SiLUSmearing(start: float = -5.0, stop: float = 5.0, num_output: int = 50, basis_width_scalar: float = 1.0)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(dist)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/scn/spherical_harmonics/index.html b/autoapi/fairchem/core/models/scn/spherical_harmonics/index.html deleted file mode 100644 index b7cb8c670..000000000 --- a/autoapi/fairchem/core/models/scn/spherical_harmonics/index.html +++ /dev/null @@ -1,784 +0,0 @@ - - - - - - - - - - - fairchem.core.models.scn.spherical_harmonics — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.scn.spherical_harmonics#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

SphericalHarmonicsHelper

Helper functions for spherical harmonics calculations and representations

-
-
-

Functions#

- - - - - - - - - -

wigner_D(l, alpha, beta, gamma)

_z_rot_mat(angle, l)

-
-
-

Attributes#

- - - - - - -

_Jd

-
-
-fairchem.core.models.scn.spherical_harmonics._Jd#
-
- -
-
-class fairchem.core.models.scn.spherical_harmonics.SphericalHarmonicsHelper(lmax: int, mmax: int, num_taps: int, num_bands: int)#
-

Helper functions for spherical harmonics calculations and representations

-
-
Parameters:
-
    -
  • lmax (int) – Maximum degree of the spherical harmonics

  • -
  • mmax (int) – Maximum order of the spherical harmonics

  • -
  • num_taps (int) – Number of taps or rotations (1 or otherwise set automatically based on mmax)

  • -
  • num_bands (int) – Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)

  • -
-
-
-
-
-InitWignerDMatrix(edge_rot_mat) None#
-
- -
-
-InitYRotMapping()#
-
- -
-
-ToGrid(x, channels) torch.Tensor#
-
- -
-
-FromGrid(x_grid, channels) torch.Tensor#
-
- -
-
-CombineYRotations(x) torch.Tensor#
-
- -
-
-Rotate(x) torch.Tensor#
-
- -
-
-FlipGrid(grid, num_channels: int) torch.Tensor#
-
- -
-
-RotateInv(x) torch.Tensor#
-
- -
-
-RotateWigner(x, wigner) torch.Tensor#
-
- -
-
-RotationMatrix(rot_x: float, rot_y: float, rot_z: float) torch.Tensor#
-
- -
-
-RotationToWignerDMatrix(edge_rot_mat, start_lmax, end_lmax)#
-
- -
- -
-
-fairchem.core.models.scn.spherical_harmonics.wigner_D(l, alpha, beta, gamma)#
-
- -
-
-fairchem.core.models.scn.spherical_harmonics._z_rot_mat(angle, l)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/utils/activations/index.html b/autoapi/fairchem/core/models/utils/activations/index.html deleted file mode 100644 index 97a7a2ab0..000000000 --- a/autoapi/fairchem/core/models/utils/activations/index.html +++ /dev/null @@ -1,687 +0,0 @@ - - - - - - - - - - - fairchem.core.models.utils.activations — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.utils.activations

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.models.utils.activations#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Act

Base class for all neural network modules.

-
-
-class fairchem.core.models.utils.activations.Act(act: str, slope: float = 0.05)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input: torch.Tensor) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/utils/basis/index.html b/autoapi/fairchem/core/models/utils/basis/index.html deleted file mode 100644 index c4bfb9a67..000000000 --- a/autoapi/fairchem/core/models/utils/basis/index.html +++ /dev/null @@ -1,1026 +0,0 @@ - - - - - - - - - - - fairchem.core.models.utils.basis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.models.utils.basis#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - -

Sine

Base class for all neural network modules.

SIREN

Base class for all neural network modules.

SINESmearing

Base class for all neural network modules.

GaussianSmearing

Base class for all neural network modules.

FourierSmearing

Base class for all neural network modules.

Basis

Base class for all neural network modules.

SphericalSmearing

Base class for all neural network modules.

-
-
-class fairchem.core.models.utils.basis.Sine(w0: float = 30.0)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.utils.basis.SIREN(layers: list[int], num_in_features: int, out_features: int, w0: float = 30.0, initializer: str | None = 'siren', c: float = 6)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(X: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.utils.basis.SINESmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.utils.basis.GaussianSmearing(num_in_features: int, start: int = 0, end: int = 1, num_freqs: int = 50)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.utils.basis.FourierSmearing(num_in_features: int, num_freqs: int = 40, use_cosine: bool = False)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(x: torch.Tensor) torch.Tensor#
-
- -
- -
-
-class fairchem.core.models.utils.basis.Basis(num_in_features: int, num_freqs: int = 50, basis_type: str = 'powersine', act: str = 'ssp', sph: SphericalSmearing | None = None)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-smearing: SINESmearing | FourierSmearing | GaussianSmearing | torch.nn.Sequential#
-
- -
-
-forward(x: torch.Tensor, edge_attr_sph: torch.Tensor | None = None)#
-
- -
- -
-
-class fairchem.core.models.utils.basis.SphericalSmearing(max_n: int = 10, option: str = 'all')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-m: numpy.typing.NDArray[numpy.int_]#
-
- -
-
-n: numpy.typing.NDArray[numpy.int_]#
-
- -
-
-forward(xyz: torch.Tensor) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/models/utils/index.html b/autoapi/fairchem/core/models/utils/index.html deleted file mode 100644 index 501e2c354..000000000 --- a/autoapi/fairchem/core/models/utils/index.html +++ /dev/null @@ -1,620 +0,0 @@ - - - - - - - - - - - fairchem.core.models.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.models.utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - - - - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/evaluator/index.html b/autoapi/fairchem/core/modules/evaluator/index.html deleted file mode 100644 index c2960fcaf..000000000 --- a/autoapi/fairchem/core/modules/evaluator/index.html +++ /dev/null @@ -1,844 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.evaluator — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.modules.evaluator#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Evaluator

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

forcesx_mae(prediction, target[, key])

forcesx_mse(prediction, target[, key])

forcesy_mae(prediction, target[, key])

forcesy_mse(prediction, target[, key])

forcesz_mae(prediction, target[, key])

forcesz_mse(prediction, target[, key])

energy_forces_within_threshold(→ dict[str, float | int])

energy_within_threshold(→ dict[str, float | int])

average_distance_within_threshold(→ dict[str, float | int])

min_diff(pred_pos, dft_pos, cell, pbc)

cosine_similarity(prediction, target[, key])

mae(→ dict[str, float | int])

mse(→ dict[str, float | int])

magnitude_error(→ dict[str, float | int])

-
-
-

Attributes#

- - - - - - -

NONE

-
-
-fairchem.core.modules.evaluator.NONE#
-
- -
-
-class fairchem.core.modules.evaluator.Evaluator(task: str | None = None, eval_metrics: dict | None = None)#
-
-
-task_metrics: ClassVar[dict[str, str]]#
-
- -
-
-task_primary_metric: ClassVar[dict[str, str | None]]#
-
- -
-
-eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics=None)#
-
- -
-
-update(key, stat, metrics)#
-
- -
- -
-
-fairchem.core.modules.evaluator.forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
-
- -
-
-fairchem.core.modules.evaluator.forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
-
- -
-
-fairchem.core.modules.evaluator.forcesy_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
-
- -
-
-fairchem.core.modules.evaluator.forcesy_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
-
- -
-
-fairchem.core.modules.evaluator.forcesz_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
-
- -
-
-fairchem.core.modules.evaluator.forcesz_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)#
-
- -
-
-fairchem.core.modules.evaluator.energy_forces_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
-
- -
-
-fairchem.core.modules.evaluator.energy_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
-
- -
-
-fairchem.core.modules.evaluator.average_distance_within_threshold(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) dict[str, float | int]#
-
- -
-
-fairchem.core.modules.evaluator.min_diff(pred_pos: torch.Tensor, dft_pos: torch.Tensor, cell: torch.Tensor, pbc: torch.Tensor)#
-
- -
-
-fairchem.core.modules.evaluator.cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)#
-
- -
-
-fairchem.core.modules.evaluator.mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) dict[str, float | int]#
-
- -
-
-fairchem.core.modules.evaluator.mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) dict[str, float | int]#
-
- -
-
-fairchem.core.modules.evaluator.magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE, p: int = 2) dict[str, float | int]#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/exponential_moving_average/index.html b/autoapi/fairchem/core/modules/exponential_moving_average/index.html deleted file mode 100644 index d24c10103..000000000 --- a/autoapi/fairchem/core/modules/exponential_moving_average/index.html +++ /dev/null @@ -1,757 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.exponential_moving_average — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.modules.exponential_moving_average#

-

Copied (and improved) from: -fadel/pytorch_ema (MIT license)

-
-

Module Contents#

-
-

Classes#

- - - - - - -

ExponentialMovingAverage

Maintains (exponential) moving average of a set of parameters.

-
-
-class fairchem.core.modules.exponential_moving_average.ExponentialMovingAverage(parameters: collections.abc.Iterable[torch.nn.Parameter], decay: float, use_num_updates: bool = False)#
-

Maintains (exponential) moving average of a set of parameters.

-
-
Parameters:
-
    -
  • parameters – Iterable of torch.nn.Parameter (typically from -model.parameters()).

  • -
  • decay – The exponential decay.

  • -
  • use_num_updates – Whether to use number of updates when computing -averages.

  • -
-
-
-
-
-_get_parameters(parameters: collections.abc.Iterable[torch.nn.Parameter] | None) collections.abc.Iterable[torch.nn.Parameter]#
-
- -
-
-update(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
-

Update currently maintained parameters.

-

Call this every time the parameters are updated, such as the result of -the optimizer.step() call.

-
-
Parameters:
-

parameters – Iterable of torch.nn.Parameter; usually the same set of -parameters used to initialize this object. If None, the -parameters with which this ExponentialMovingAverage was -initialized will be used.

-
-
-
- -
-
-copy_to(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
-

Copy current parameters into given collection of parameters.

-
-
Parameters:
-

parameters – Iterable of torch.nn.Parameter; the parameters to be -updated with the stored moving averages. If None, the -parameters with which this ExponentialMovingAverage was -initialized will be used.

-
-
-
- -
-
-store(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
-

Save the current parameters for restoring later.

-
-
Parameters:
-

parameters – Iterable of torch.nn.Parameter; the parameters to be -temporarily stored. If None, the parameters of with which this -ExponentialMovingAverage was initialized will be used.

-
-
-
- -
-
-restore(parameters: collections.abc.Iterable[torch.nn.Parameter] | None = None) None#
-

Restore the parameters stored with the store method. -Useful to validate the model with EMA parameters without affecting the -original optimization process. Store the parameters before the -copy_to method. After validation (or model saving), use this to -restore the former parameters.

-
-
Parameters:
-

parameters – Iterable of torch.nn.Parameter; the parameters to be -updated with the stored parameters. If None, the -parameters with which this ExponentialMovingAverage was -initialized will be used.

-
-
-
- -
-
-state_dict() dict#
-

Returns the state of the ExponentialMovingAverage as a dict.

-
- -
-
-load_state_dict(state_dict: dict) None#
-

Loads the ExponentialMovingAverage state.

-
-
Parameters:
-

state_dict (dict) – EMA state. Should be an object returned -from a call to state_dict().

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/index.html b/autoapi/fairchem/core/modules/index.html deleted file mode 100644 index 6737df61c..000000000 --- a/autoapi/fairchem/core/modules/index.html +++ /dev/null @@ -1,643 +0,0 @@ - - - - - - - - - - - fairchem.core.modules — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.modules#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/loss/index.html b/autoapi/fairchem/core/modules/loss/index.html deleted file mode 100644 index 4500fd28d..000000000 --- a/autoapi/fairchem/core/modules/loss/index.html +++ /dev/null @@ -1,790 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.loss — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.loss

- -
- -
-
- - - - -
- -
-

fairchem.core.modules.loss#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

L2MAELoss

Base class for all neural network modules.

AtomwiseL2Loss

Base class for all neural network modules.

DDPLoss

Base class for all neural network modules.

-
-
-class fairchem.core.modules.loss.L2MAELoss(reduction: str = 'mean')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input: torch.Tensor, target: torch.Tensor)#
-
- -
- -
-
-class fairchem.core.modules.loss.AtomwiseL2Loss(reduction: str = 'mean')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor)#
-
- -
- -
-
-class fairchem.core.modules.loss.DDPLoss(loss_fn, loss_name: str = 'mae', reduction: str = 'mean')#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor | None = None, batch_size: int | None = None)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/normalizer/index.html b/autoapi/fairchem/core/modules/normalizer/index.html deleted file mode 100644 index a9e0cc0c5..000000000 --- a/autoapi/fairchem/core/modules/normalizer/index.html +++ /dev/null @@ -1,684 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.normalizer — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.normalizer

- -
- -
-
- - - - -
- -
-

fairchem.core.modules.normalizer#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Normalizer

Normalize a Tensor and restore it later.

-
-
-class fairchem.core.modules.normalizer.Normalizer(tensor: torch.Tensor | None = None, mean=None, std=None, device=None)#
-

Normalize a Tensor and restore it later.

-
-
-to(device) None#
-
- -
-
-norm(tensor: torch.Tensor) torch.Tensor#
-
- -
-
-denorm(normed_tensor: torch.Tensor) torch.Tensor#
-
- -
-
-state_dict()#
-
- -
-
-load_state_dict(state_dict) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scaling/compat/index.html b/autoapi/fairchem/core/modules/scaling/compat/index.html deleted file mode 100644 index 72f9480e6..000000000 --- a/autoapi/fairchem/core/modules/scaling/compat/index.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scaling.compat — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.scaling.compat

- -
- -
-
- - - - -
- -
-

fairchem.core.modules.scaling.compat#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

_load_scale_dict(scale_file)

Loads scale factors from either:

load_scales_compat(→ None)

-
-
-

Attributes#

- - - - - - -

ScaleDict

-
-
-fairchem.core.modules.scaling.compat.ScaleDict#
-
- -
-
-fairchem.core.modules.scaling.compat._load_scale_dict(scale_file: str | ScaleDict | None)#
-

Loads scale factors from either: -- a JSON file mapping scale factor names to scale values -- a python dictionary pickled object (loaded using torch.load) mapping scale factor names to scale values -- a dictionary mapping scale factor names to scale values

-
- -
-
-fairchem.core.modules.scaling.compat.load_scales_compat(module: torch.nn.Module, scale_file: str | ScaleDict | None) None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scaling/fit/index.html b/autoapi/fairchem/core/modules/scaling/fit/index.html deleted file mode 100644 index 4cf3a4a9a..000000000 --- a/autoapi/fairchem/core/modules/scaling/fit/index.html +++ /dev/null @@ -1,661 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scaling.fit — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.scaling.fit

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.modules.scaling.fit#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

_prefilled_input(→ str)

_train_batch(→ None)

main(→ None)

-
-
-fairchem.core.modules.scaling.fit._prefilled_input(prompt: str, prefill: str = '') str#
-
- -
-
-fairchem.core.modules.scaling.fit._train_batch(trainer: fairchem.core.trainers.base_trainer.BaseTrainer, batch) None#
-
- -
-
-fairchem.core.modules.scaling.fit.main(*, num_batches: int = 16) None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scaling/index.html b/autoapi/fairchem/core/modules/scaling/index.html deleted file mode 100644 index ad4b51d4c..000000000 --- a/autoapi/fairchem/core/modules/scaling/index.html +++ /dev/null @@ -1,781 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scaling — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.modules.scaling#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

ScaleFactor

Base class for all neural network modules.

-
-
-class fairchem.core.modules.scaling.ScaleFactor(name: str | None = None, enforce_consistency: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-property fitted: bool#
-
- -
-
-scale_factor: torch.Tensor#
-
- -
-
-name: str | None#
-
- -
-
-index_fn: IndexFn | None#
-
- -
-
-stats: _Stats | None#
-
- -
-
-_enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) None#
-
- -
-
-reset_() None#
-
- -
-
-set_(scale: float | torch.Tensor) None#
-
- -
-
-initialize_(*, index_fn: IndexFn | None = None) None#
-
- -
-
-fit_context_()#
-
- -
-
-fit_()#
-
- -
-
-_observe(x: torch.Tensor, ref: torch.Tensor | None = None) None#
-
- -
-
-forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scaling/scale_factor/index.html b/autoapi/fairchem/core/modules/scaling/scale_factor/index.html deleted file mode 100644 index b19521bca..000000000 --- a/autoapi/fairchem/core/modules/scaling/scale_factor/index.html +++ /dev/null @@ -1,857 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scaling.scale_factor — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.modules.scaling.scale_factor#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

_Stats

dict() -> new empty dictionary

ScaleFactor

Base class for all neural network modules.

-
-
-

Functions#

- - - - - - -

_check_consistency(→ None)

-
-
-

Attributes#

- - - - - - -

IndexFn

-
-
-class fairchem.core.modules.scaling.scale_factor._Stats#
-

Bases: TypedDict

-

dict() -> new empty dictionary -dict(mapping) -> new dictionary initialized from a mapping object’s

-
-

(key, value) pairs

-
-
-
dict(iterable) -> new dictionary initialized as if via:

d = {} -for k, v in iterable:

-
-

d[k] = v

-
-
-
dict(**kwargs) -> new dictionary initialized with the name=value pairs

in the keyword argument list. For example: dict(one=1, two=2)

-
-
-
-
-variance_in: float#
-
- -
-
-variance_out: float#
-
- -
-
-n_samples: int#
-
- -
- -
-
-fairchem.core.modules.scaling.scale_factor.IndexFn#
-
- -
-
-fairchem.core.modules.scaling.scale_factor._check_consistency(old: torch.Tensor, new: torch.Tensor, key: str) None#
-
- -
-
-class fairchem.core.modules.scaling.scale_factor.ScaleFactor(name: str | None = None, enforce_consistency: bool = True)#
-

Bases: torch.nn.Module

-

Base class for all neural network modules.

-

Your models should also subclass this class.

-

Modules can also contain other Modules, allowing to nest them in -a tree structure. You can assign the submodules as regular attributes:

-
import torch.nn as nn
-import torch.nn.functional as F
-
-class Model(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 20, 5)
-        self.conv2 = nn.Conv2d(20, 20, 5)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        return F.relu(self.conv2(x))
-
-
-

Submodules assigned in this way will be registered, and will have their -parameters converted too when you call to(), etc.

-
-

Note

-

As per the example above, an __init__() call to the parent class -must be made before assignment on the child.

-
-
-
Variables:
-

training (bool) – Boolean represents whether this module is in training or -evaluation mode.

-
-
-
-
-property fitted: bool#
-
- -
-
-scale_factor: torch.Tensor#
-
- -
-
-name: str | None#
-
- -
-
-index_fn: IndexFn | None#
-
- -
-
-stats: _Stats | None#
-
- -
-
-_enforce_consistency(state_dict, prefix, _local_metadata, _strict, _missing_keys, _unexpected_keys, _error_msgs) None#
-
- -
-
-reset_() None#
-
- -
-
-set_(scale: float | torch.Tensor) None#
-
- -
-
-initialize_(*, index_fn: IndexFn | None = None) None#
-
- -
-
-fit_context_()#
-
- -
-
-fit_()#
-
- -
-
-_observe(x: torch.Tensor, ref: torch.Tensor | None = None) None#
-
- -
-
-forward(x: torch.Tensor, *, ref: torch.Tensor | None = None) torch.Tensor#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scaling/util/index.html b/autoapi/fairchem/core/modules/scaling/util/index.html deleted file mode 100644 index abdf412cb..000000000 --- a/autoapi/fairchem/core/modules/scaling/util/index.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scaling.util — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.scaling.util

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.modules.scaling.util#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

ensure_fitted(→ None)

-
-
-fairchem.core.modules.scaling.util.ensure_fitted(module: torch.nn.Module, warn: bool = False) None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/scheduler/index.html b/autoapi/fairchem/core/modules/scheduler/index.html deleted file mode 100644 index 7f06c6788..000000000 --- a/autoapi/fairchem/core/modules/scheduler/index.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.scheduler — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.scheduler

- -
- -
-
- - - - -
- -
-

fairchem.core.modules.scheduler#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

LRScheduler

Learning rate scheduler class for torch.optim learning rate schedulers

-
-
-class fairchem.core.modules.scheduler.LRScheduler(optimizer, config)#
-

Learning rate scheduler class for torch.optim learning rate schedulers

-

Notes

-

If no learning rate scheduler is specified in the config the default -scheduler is warmup_lr_lambda (fairchem.core.common.utils) not no scheduler, -this is for backward-compatibility reasons. To run without a lr scheduler -specify scheduler: “Null” in the optim section of the config.

-
-
Parameters:
-
    -
  • optimizer (obj) – torch optim object

  • -
  • config (dict) – Optim dict from the input config

  • -
-
-
-
-
-step(metrics=None, epoch=None) None#
-
- -
-
-filter_kwargs(config)#
-
- -
-
-get_lr()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/modules/transforms/index.html b/autoapi/fairchem/core/modules/transforms/index.html deleted file mode 100644 index 1fb89e5da..000000000 --- a/autoapi/fairchem/core/modules/transforms/index.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - fairchem.core.modules.transforms — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.modules.transforms

- -
- -
-
- - - - -
- -
-

fairchem.core.modules.transforms#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

DataTransforms

-
-
-

Functions#

- - - - - - -

decompose_tensor(→ torch_geometric.data.Data)

-
-
-class fairchem.core.modules.transforms.DataTransforms(config)#
-
-
-__call__(data_object)#
-
- -
- -
-
-fairchem.core.modules.transforms.decompose_tensor(data_object, config) torch_geometric.data.Data#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.html b/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.html deleted file mode 100644 index 18b73ee13..000000000 --- a/autoapi/fairchem/core/preprocessing/atoms_to_graphs/index.html +++ /dev/null @@ -1,925 +0,0 @@ - - - - - - - - - - - fairchem.core.preprocessing.atoms_to_graphs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.preprocessing.atoms_to_graphs#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

AtomsToGraphs

A class to help convert periodic atomic structures to graphs.

-
-
-

Attributes#

- - - - - - - - - -

AseAtomsAdaptor

shell

-
-
-fairchem.core.preprocessing.atoms_to_graphs.AseAtomsAdaptor#
-
- -
-
-fairchem.core.preprocessing.atoms_to_graphs.shell#
-
- -
-
-class fairchem.core.preprocessing.atoms_to_graphs.AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None)#
-

A class to help convert periodic atomic structures to graphs.

-

The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts -them into graph representations for use in PyTorch. The primary purpose of this class is to determine the -nearest neighbors within some radius around each individual atom, taking into account PBC, and set the -pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information -are put into a PyTorch geometric data object for use with PyTorch.

-
-
Parameters:
-
    -
  • max_neigh (int) – Maximum number of neighbors to consider.

  • -
  • radius (int or float) – Cutoff radius in Angstroms to search for neighbors.

  • -
  • r_energy (bool) – Return the energy with other properties. Default is False, so the energy will not be returned.

  • -
  • r_forces (bool) – Return the forces with other properties. Default is False, so the forces will not be returned.

  • -
  • r_stress (bool) – Return the stress with other properties. Default is False, so the stress will not be returned.

  • -
  • r_distances (bool) – Return the distances with other properties.

  • -
  • False (Default is)

  • -
  • returned. (so the periodic boundary conditions will not be)

  • -
  • r_edges (bool) – Return interatomic edges with other properties. Default is True, so edges will be returned.

  • -
  • r_fixed (bool) – Return a binary vector with flags for fixed (1) vs free (0) atoms.

  • -
  • True (Default is)

  • -
  • returned.

  • -
  • r_pbc (bool) – Return the periodic boundary conditions with other properties.

  • -
  • False

  • -
  • returned.

  • -
  • r_data_keys (sequence of str, optional) – Return values corresponding to given keys in atoms.info data with other

  • -
  • None (properties. Default is)

  • -
  • properties. (so no data will be returned as)

  • -
-
-
-
-
-max_neigh#
-

Maximum number of neighbors to consider.

-
-
Type:
-

int

-
-
-
- -
-
-radius#
-

Cutoff radius in Angstoms to search for neighbors.

-
-
Type:
-

int or float

-
-
-
- -
-
-r_energy#
-

Return the energy with other properties. Default is False, so the energy will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_forces#
-

Return the forces with other properties. Default is False, so the forces will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_stress#
-

Return the stress with other properties. Default is False, so the stress will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_distances#
-

Return the distances with other properties.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is False, so the distances will not be returned.
-
- -
-
-r_edges#
-

Return interatomic edges with other properties. Default is True, so edges will be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_fixed#
-

Return a binary vector with flags for fixed (1) vs free (0) atoms.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is True, so the fixed indices will be returned.
-
- -
-
-r_pbc#
-

Return the periodic boundary conditions with other properties.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is False, so the periodic boundary conditions will not be returned.
-
- -
-
-r_data_keys#
-

Return values corresponding to given keys in atoms.info data with other

-
-
Type:
-

sequence of str, optional

-
-
-
- -
-
-properties. Default is None, so no data will be returned as properties.
-
- -
-
-_get_neighbors_pymatgen(atoms: ase.Atoms)#
-

Preforms nearest neighbor search and returns edge index, distances, -and cell offsets

-
- -
-
-_reshape_features(c_index, n_index, n_distance, offsets)#
-

Stack center and neighbor index and reshapes distances, -takes in np.arrays and returns torch tensors

-
- -
-
-convert(atoms: ase.Atoms, sid=None)#
-

Convert a single atomic structure to a graph.

-
-
Parameters:
-
    -
  • atoms (ase.atoms.Atoms) – An ASE atoms object.

  • -
  • sid (uniquely identifying object) – An identifier that can be used to track the structure in downstream

  • -
  • integers. (tasks. Common sids used in OCP datasets include unique strings or)

  • -
-
-
Returns:
-

A torch geometic data object with positions, atomic_numbers, tags, -and optionally, energy, forces, distances, edges, and periodic boundary conditions. -Optional properties can included by setting r_property=True when constructing the class.

-
-
Return type:
-

data (torch_geometric.data.Data)

-
-
-
- -
-
-convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False)#
-

Convert all atoms objects in a list or in an ase.db to graphs.

-
-
Parameters:
-
    -
  • atoms_collection (list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database)

  • -
  • database. (Either a list of ASE atoms objects or an ASE)

  • -
  • processed_file_path (str)

  • -
  • None. (A string of the path to where the processed file will be written. Default is)

  • -
  • collate_and_save (bool) – A boolean to collate and save or not. Default is False, so will not write a file.

  • -
-
-
Returns:
-

A list of torch geometric data objects containing molecular graph info and properties.

-
-
Return type:
-

data_list (list of torch_geometric.data.Data)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/preprocessing/index.html b/autoapi/fairchem/core/preprocessing/index.html deleted file mode 100644 index 09fd6180c..000000000 --- a/autoapi/fairchem/core/preprocessing/index.html +++ /dev/null @@ -1,906 +0,0 @@ - - - - - - - - - - - fairchem.core.preprocessing — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.preprocessing#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

AtomsToGraphs

A class to help convert periodic atomic structures to graphs.

-
-
-class fairchem.core.preprocessing.AtomsToGraphs(max_neigh: int = 200, radius: int = 6, r_energy: bool = False, r_forces: bool = False, r_distances: bool = False, r_edges: bool = True, r_fixed: bool = True, r_pbc: bool = False, r_stress: bool = False, r_data_keys: collections.abc.Sequence[str] | None = None)#
-

A class to help convert periodic atomic structures to graphs.

-

The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts -them into graph representations for use in PyTorch. The primary purpose of this class is to determine the -nearest neighbors within some radius around each individual atom, taking into account PBC, and set the -pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information -are put into a PyTorch geometric data object for use with PyTorch.

-
-
Parameters:
-
    -
  • max_neigh (int) – Maximum number of neighbors to consider.

  • -
  • radius (int or float) – Cutoff radius in Angstroms to search for neighbors.

  • -
  • r_energy (bool) – Return the energy with other properties. Default is False, so the energy will not be returned.

  • -
  • r_forces (bool) – Return the forces with other properties. Default is False, so the forces will not be returned.

  • -
  • r_stress (bool) – Return the stress with other properties. Default is False, so the stress will not be returned.

  • -
  • r_distances (bool) – Return the distances with other properties.

  • -
  • False (Default is)

  • -
  • returned. (so the periodic boundary conditions will not be)

  • -
  • r_edges (bool) – Return interatomic edges with other properties. Default is True, so edges will be returned.

  • -
  • r_fixed (bool) – Return a binary vector with flags for fixed (1) vs free (0) atoms.

  • -
  • True (Default is)

  • -
  • returned.

  • -
  • r_pbc (bool) – Return the periodic boundary conditions with other properties.

  • -
  • False

  • -
  • returned.

  • -
  • r_data_keys (sequence of str, optional) – Return values corresponding to given keys in atoms.info data with other

  • -
  • None (properties. Default is)

  • -
  • properties. (so no data will be returned as)

  • -
-
-
-
-
-max_neigh#
-

Maximum number of neighbors to consider.

-
-
Type:
-

int

-
-
-
- -
-
-radius#
-

Cutoff radius in Angstoms to search for neighbors.

-
-
Type:
-

int or float

-
-
-
- -
-
-r_energy#
-

Return the energy with other properties. Default is False, so the energy will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_forces#
-

Return the forces with other properties. Default is False, so the forces will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_stress#
-

Return the stress with other properties. Default is False, so the stress will not be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_distances#
-

Return the distances with other properties.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is False, so the distances will not be returned.
-
- -
-
-r_edges#
-

Return interatomic edges with other properties. Default is True, so edges will be returned.

-
-
Type:
-

bool

-
-
-
- -
-
-r_fixed#
-

Return a binary vector with flags for fixed (1) vs free (0) atoms.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is True, so the fixed indices will be returned.
-
- -
-
-r_pbc#
-

Return the periodic boundary conditions with other properties.

-
-
Type:
-

bool

-
-
-
- -
-
-Default is False, so the periodic boundary conditions will not be returned.
-
- -
-
-r_data_keys#
-

Return values corresponding to given keys in atoms.info data with other

-
-
Type:
-

sequence of str, optional

-
-
-
- -
-
-properties. Default is None, so no data will be returned as properties.
-
- -
-
-_get_neighbors_pymatgen(atoms: ase.Atoms)#
-

Preforms nearest neighbor search and returns edge index, distances, -and cell offsets

-
- -
-
-_reshape_features(c_index, n_index, n_distance, offsets)#
-

Stack center and neighbor index and reshapes distances, -takes in np.arrays and returns torch tensors

-
- -
-
-convert(atoms: ase.Atoms, sid=None)#
-

Convert a single atomic structure to a graph.

-
-
Parameters:
-
    -
  • atoms (ase.atoms.Atoms) – An ASE atoms object.

  • -
  • sid (uniquely identifying object) – An identifier that can be used to track the structure in downstream

  • -
  • integers. (tasks. Common sids used in OCP datasets include unique strings or)

  • -
-
-
Returns:
-

A torch geometic data object with positions, atomic_numbers, tags, -and optionally, energy, forces, distances, edges, and periodic boundary conditions. -Optional properties can included by setting r_property=True when constructing the class.

-
-
Return type:
-

data (torch_geometric.data.Data)

-
-
-
- -
-
-convert_all(atoms_collection, processed_file_path: str | None = None, collate_and_save=False, disable_tqdm=False)#
-

Convert all atoms objects in a list or in an ase.db to graphs.

-
-
Parameters:
-
    -
  • atoms_collection (list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database)

  • -
  • database. (Either a list of ASE atoms objects or an ASE)

  • -
  • processed_file_path (str)

  • -
  • None. (A string of the path to where the processed file will be written. Default is)

  • -
  • collate_and_save (bool) – A boolean to collate and save or not. Default is False, so will not write a file.

  • -
-
-
Returns:
-

A list of torch geometric data objects containing molecular graph info and properties.

-
-
Return type:
-

data_list (list of torch_geometric.data.Data)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/download_data/index.html b/autoapi/fairchem/core/scripts/download_data/index.html deleted file mode 100644 index d7a53498a..000000000 --- a/autoapi/fairchem/core/scripts/download_data/index.html +++ /dev/null @@ -1,730 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.download_data — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.download_data

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.download_data#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

get_data(→ None)

uncompress_data(→ str)

preprocess_data(→ None)

verify_count(→ None)

cleanup(→ None)

-
-
-

Attributes#

- - - - - - - - - - - - - - - -

DOWNLOAD_LINKS_s2ef

DOWNLOAD_LINKS_is2re

S2EF_COUNTS

parser

-
- -
- -
- -
- -
-
-fairchem.core.scripts.download_data.S2EF_COUNTS#
-
- -
-
-fairchem.core.scripts.download_data.get_data(datadir: str, task: str, split: str | None, del_intmd_files: bool) None#
-
- -
-
-fairchem.core.scripts.download_data.uncompress_data(compressed_dir: str) str#
-
- -
-
-fairchem.core.scripts.download_data.preprocess_data(uncompressed_dir: str, output_path: str) None#
-
- -
-
-fairchem.core.scripts.download_data.verify_count(output_path: str, task: str, split: str) None#
-
- -
-
-fairchem.core.scripts.download_data.cleanup(filename: str, dirname: str) None#
-
- -
-
-fairchem.core.scripts.download_data.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.html b/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.html deleted file mode 100644 index dc04d05d4..000000000 --- a/autoapi/fairchem/core/scripts/gif_maker_parallelized/index.html +++ /dev/null @@ -1,687 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.gif_maker_parallelized — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.gif_maker_parallelized

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.gif_maker_parallelized#

-

Script to generate gifs from traj

-

Note: -This is just a quick way to generate gifs and visalizations from traj, there are many parameters and settings in the code that people can vary to make visualizations better. We have chosen these settings as this seem to work fine for most of our systems.

-

Requirements:

-

povray -ffmpeg -ase==3.21

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

pov_from_atoms(→ None)

parallelize_generation(→ None)

get_parser(→ argparse.ArgumentParser)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.gif_maker_parallelized.pov_from_atoms(mp_args) None#
-
- -
-
-fairchem.core.scripts.gif_maker_parallelized.parallelize_generation(traj_path, out_path: str, n_procs) None#
-
- -
-
-fairchem.core.scripts.gif_maker_parallelized.get_parser() argparse.ArgumentParser#
-
- -
-
-fairchem.core.scripts.gif_maker_parallelized.parser: argparse.ArgumentParser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/hpo/index.html b/autoapi/fairchem/core/scripts/hpo/index.html deleted file mode 100644 index eb6b3f0dc..000000000 --- a/autoapi/fairchem/core/scripts/hpo/index.html +++ /dev/null @@ -1,623 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.hpo — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.hpo

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts.hpo#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/hpo/run_tune/index.html b/autoapi/fairchem/core/scripts/hpo/run_tune/index.html deleted file mode 100644 index 1bf5d4e39..000000000 --- a/autoapi/fairchem/core/scripts/hpo/run_tune/index.html +++ /dev/null @@ -1,651 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.hpo.run_tune — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.hpo.run_tune

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts.hpo.run_tune#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

ocp_trainable(→ None)

main(→ None)

-
-
-fairchem.core.scripts.hpo.run_tune.ocp_trainable(config, checkpoint_dir=None) None#
-
- -
-
-fairchem.core.scripts.hpo.run_tune.main() None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.html b/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.html deleted file mode 100644 index da4c93b36..000000000 --- a/autoapi/fairchem/core/scripts/hpo/run_tune_pbt/index.html +++ /dev/null @@ -1,651 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.hpo.run_tune_pbt — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.hpo.run_tune_pbt

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts.hpo.run_tune_pbt#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

ocp_trainable(→ None)

main(→ None)

-
-
-fairchem.core.scripts.hpo.run_tune_pbt.ocp_trainable(config, checkpoint_dir=None) None#
-
- -
-
-fairchem.core.scripts.hpo.run_tune_pbt.main() None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/index.html b/autoapi/fairchem/core/scripts/index.html deleted file mode 100644 index 99aa36449..000000000 --- a/autoapi/fairchem/core/scripts/index.html +++ /dev/null @@ -1,643 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.html b/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.html deleted file mode 100644 index 69fe922d9..000000000 --- a/autoapi/fairchem/core/scripts/make_challenge_submission_file/index.html +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.make_challenge_submission_file — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.make_challenge_submission_file

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.make_challenge_submission_file#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-

ONLY for use in the NeurIPS 2021 Open Catalyst Challenge. For all other submissions -please use make_submission_file.py.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

write_is2re_relaxations(→ None)

write_predictions(→ None)

main(→ None)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.make_challenge_submission_file.write_is2re_relaxations(path: str, filename: str, hybrid) None#
-
- -
-
-fairchem.core.scripts.make_challenge_submission_file.write_predictions(path: str, filename: str) None#
-
- -
-
-fairchem.core.scripts.make_challenge_submission_file.main(args: argparse.Namespace) None#
-
- -
-
-fairchem.core.scripts.make_challenge_submission_file.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.html b/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.html deleted file mode 100644 index 8804f154a..000000000 --- a/autoapi/fairchem/core/scripts/make_lmdb_sizes/index.html +++ /dev/null @@ -1,672 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.make_lmdb_sizes — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.make_lmdb_sizes

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts.make_lmdb_sizes#

-

This script provides the functionality to generate metadata.npz files necessary -for load_balancing the DataLoader.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

get_data(index)

main(→ None)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.make_lmdb_sizes.get_data(index)#
-
- -
-
-fairchem.core.scripts.make_lmdb_sizes.main(args) None#
-
- -
-
-fairchem.core.scripts.make_lmdb_sizes.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/make_submission_file/index.html b/autoapi/fairchem/core/scripts/make_submission_file/index.html deleted file mode 100644 index 405ee0398..000000000 --- a/autoapi/fairchem/core/scripts/make_submission_file/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.make_submission_file — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.make_submission_file

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.make_submission_file#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

write_is2re_relaxations(→ None)

write_predictions(→ None)

main(→ None)

-
-
-

Attributes#

- - - - - - - - - -

SPLITS

parser

-
-
-fairchem.core.scripts.make_submission_file.SPLITS#
-
- -
-
-fairchem.core.scripts.make_submission_file.write_is2re_relaxations(args) None#
-
- -
-
-fairchem.core.scripts.make_submission_file.write_predictions(args) None#
-
- -
-
-fairchem.core.scripts.make_submission_file.main(args: argparse.Namespace) None#
-
- -
-
-fairchem.core.scripts.make_submission_file.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/preprocess_ef/index.html b/autoapi/fairchem/core/scripts/preprocess_ef/index.html deleted file mode 100644 index 0202d3544..000000000 --- a/autoapi/fairchem/core/scripts/preprocess_ef/index.html +++ /dev/null @@ -1,682 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.preprocess_ef — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.preprocess_ef

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.preprocess_ef#

-

Creates LMDB files with extracted graph features from provided *.extxyz files -for the S2EF task.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

write_images_to_lmdb(mp_arg)

main(→ None)

get_parser(→ argparse.ArgumentParser)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.preprocess_ef.write_images_to_lmdb(mp_arg)#
-
- -
-
-fairchem.core.scripts.preprocess_ef.main(args: argparse.Namespace) None#
-
- -
-
-fairchem.core.scripts.preprocess_ef.get_parser() argparse.ArgumentParser#
-
- -
-
-fairchem.core.scripts.preprocess_ef.parser: argparse.ArgumentParser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/preprocess_relaxed/index.html b/autoapi/fairchem/core/scripts/preprocess_relaxed/index.html deleted file mode 100644 index 7ad34beaf..000000000 --- a/autoapi/fairchem/core/scripts/preprocess_relaxed/index.html +++ /dev/null @@ -1,672 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.preprocess_relaxed — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.preprocess_relaxed

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.scripts.preprocess_relaxed#

-

Creates LMDB files with extracted graph features from provided *.extxyz files -for the S2EF task.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

write_images_to_lmdb(→ None)

main(→ None)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.preprocess_relaxed.write_images_to_lmdb(mp_arg) None#
-
- -
-
-fairchem.core.scripts.preprocess_relaxed.main(args, split) None#
-
- -
-
-fairchem.core.scripts.preprocess_relaxed.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/scripts/uncompress/index.html b/autoapi/fairchem/core/scripts/uncompress/index.html deleted file mode 100644 index d1aa981e0..000000000 --- a/autoapi/fairchem/core/scripts/uncompress/index.html +++ /dev/null @@ -1,692 +0,0 @@ - - - - - - - - - - - fairchem.core.scripts.uncompress — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.scripts.uncompress

- -
- -
-
- - - - -
- -
-

fairchem.core.scripts.uncompress#

-

Uncompresses downloaded S2EF datasets to be used by the LMDB preprocessing -script - preprocess_ef.py

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - -

read_lzma(→ None)

decompress_list_of_files(→ None)

get_parser(→ argparse.ArgumentParser)

main(→ None)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.core.scripts.uncompress.read_lzma(inpfile: str, outfile: str) None#
-
- -
-
-fairchem.core.scripts.uncompress.decompress_list_of_files(ip_op_pair: tuple[str, str]) None#
-
- -
-
-fairchem.core.scripts.uncompress.get_parser() argparse.ArgumentParser#
-
- -
-
-fairchem.core.scripts.uncompress.main(args: argparse.Namespace) None#
-
- -
-
-fairchem.core.scripts.uncompress.parser: argparse.ArgumentParser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tasks/index.html b/autoapi/fairchem/core/tasks/index.html deleted file mode 100644 index 6593414cd..000000000 --- a/autoapi/fairchem/core/tasks/index.html +++ /dev/null @@ -1,736 +0,0 @@ - - - - - - - - - - - fairchem.core.tasks — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tasks#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

PredictTask

RelaxationTask

TrainTask

ValidateTask

-
-
-class fairchem.core.tasks.PredictTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.RelaxationTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.TrainTask(config)#
-

Bases: BaseTask

-
-
-_process_error(e: RuntimeError) None#
-
- -
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.ValidateTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tasks/task/index.html b/autoapi/fairchem/core/tasks/task/index.html deleted file mode 100644 index 67387ec19..000000000 --- a/autoapi/fairchem/core/tasks/task/index.html +++ /dev/null @@ -1,757 +0,0 @@ - - - - - - - - - - - fairchem.core.tasks.task — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tasks.task#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

BaseTask

TrainTask

PredictTask

ValidateTask

RelaxationTask

-
-
-class fairchem.core.tasks.task.BaseTask(config)#
-
-
-setup(trainer) None#
-
- -
-
-abstract run()#
-
- -
- -
-
-class fairchem.core.tasks.task.TrainTask(config)#
-

Bases: BaseTask

-
-
-_process_error(e: RuntimeError) None#
-
- -
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.task.PredictTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.task.ValidateTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-class fairchem.core.tasks.task.RelaxationTask(config)#
-

Bases: BaseTask

-
-
-run() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/common/test_ase_calculator/index.html b/autoapi/fairchem/core/tests/common/test_ase_calculator/index.html deleted file mode 100644 index 0455c99bf..000000000 --- a/autoapi/fairchem/core/tests/common/test_ase_calculator/index.html +++ /dev/null @@ -1,686 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.common.test_ase_calculator — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.common.test_ase_calculator

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.common.test_ase_calculator#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

atoms(→ ase.Atoms)

checkpoint_path(request, tmp_path)

test_calculator_setup(checkpoint_path)

test_relaxation_final_energy(→ None)

test_random_seed_final_energy(atoms, tmp_path)

-
-
-fairchem.core.tests.common.test_ase_calculator.atoms() ase.Atoms#
-
- -
-
-fairchem.core.tests.common.test_ase_calculator.checkpoint_path(request, tmp_path)#
-
- -
-
-fairchem.core.tests.common.test_ase_calculator.test_calculator_setup(checkpoint_path)#
-
- -
-
-fairchem.core.tests.common.test_ase_calculator.test_relaxation_final_energy(atoms, tmp_path, snapshot) None#
-
- -
-
-fairchem.core.tests.common.test_ase_calculator.test_random_seed_final_energy(atoms, tmp_path)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.html b/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.html deleted file mode 100644 index a319dd2c1..000000000 --- a/autoapi/fairchem/core/tests/common/test_data_parallel_batch_sampler/index.html +++ /dev/null @@ -1,842 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.common.test_data_parallel_batch_sampler — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.common.test_data_parallel_batch_sampler#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

_temp_file(name)

valid_path_dataset()

invalid_path_dataset()

invalid_dataset()

test_lowercase(→ None)

test_invalid_mode(→ None)

test_invalid_dataset(→ None)

test_invalid_path_dataset(→ None)

test_valid_dataset(→ None)

test_disabled(→ None)

test_single_node(→ None)

test_stateful_distributed_sampler_noshuffle(→ None)

test_stateful_distributed_sampler_vs_distributed_sampler(→ None)

test_stateful_distributed_sampler(→ None)

test_stateful_distributed_sampler_numreplicas(→ None)

test_stateful_distributed_sampler_numreplicas_drop_last(→ None)

-
-
-

Attributes#

- - - - - - - - - - - - - - - -

DATA

SIZE_ATOMS

SIZE_NEIGHBORS

T_co

-
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.DATA = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.SIZE_ATOMS = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.SIZE_NEIGHBORS = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.T_co#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler._temp_file(name: str)#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.valid_path_dataset()#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.invalid_path_dataset()#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.invalid_dataset()#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_lowercase(invalid_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_mode(invalid_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_dataset(invalid_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_invalid_path_dataset(invalid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_valid_dataset(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_disabled(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_single_node(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_noshuffle(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_vs_distributed_sampler(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas(valid_path_dataset) None#
-
- -
-
-fairchem.core.tests.common.test_data_parallel_batch_sampler.test_stateful_distributed_sampler_numreplicas_drop_last(valid_path_dataset) None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/common/test_yaml_loader/index.html b/autoapi/fairchem/core/tests/common/test_yaml_loader/index.html deleted file mode 100644 index efbd84d8d..000000000 --- a/autoapi/fairchem/core/tests/common/test_yaml_loader/index.html +++ /dev/null @@ -1,673 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.common.test_yaml_loader — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.common.test_yaml_loader

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.common.test_yaml_loader#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - -

invalid_yaml_config()

valid_yaml_config()

test_invalid_config(invalid_yaml_config)

test_valid_config(valid_yaml_config)

-
-
-fairchem.core.tests.common.test_yaml_loader.invalid_yaml_config()#
-
- -
-
-fairchem.core.tests.common.test_yaml_loader.valid_yaml_config()#
-
- -
-
-fairchem.core.tests.common.test_yaml_loader.test_invalid_config(invalid_yaml_config)#
-
- -
-
-fairchem.core.tests.common.test_yaml_loader.test_valid_config(valid_yaml_config)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/conftest/index.html b/autoapi/fairchem/core/tests/conftest/index.html deleted file mode 100644 index 02876f5f3..000000000 --- a/autoapi/fairchem/core/tests/conftest/index.html +++ /dev/null @@ -1,777 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.conftest — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.conftest#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

Approx

Wrapper object for approximately compared numpy arrays.

_ApproxNumpyFormatter

ApproxExtension

By default, syrupy uses the __repr__ of the expected (snapshot) and actual values

-
-
-

Functions#

- - - - - - - - - -

_try_parse_approx(→ Approx | None)

Parse the string representation of an Approx object.

snapshot(snapshot)

-
-
-

Attributes#

- - - - - - - - - -

DEFAULT_RTOL

DEFAULT_ATOL

-
-
-fairchem.core.tests.conftest.DEFAULT_RTOL = 0.001#
-
- -
-
-fairchem.core.tests.conftest.DEFAULT_ATOL = 0.001#
-
- -
-
-class fairchem.core.tests.conftest.Approx(data: numpy.ndarray | list, *, rtol: float | None = None, atol: float | None = None)#
-

Wrapper object for approximately compared numpy arrays.

-
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-class fairchem.core.tests.conftest._ApproxNumpyFormatter(data)#
-
-
-__repr__() str#
-

Return repr(self).

-
- -
- -
-
-fairchem.core.tests.conftest._try_parse_approx(data: syrupy.types.SerializableData) Approx | None#
-

Parse the string representation of an Approx object. -We can just use eval here, since we know the string is safe.

-
- -
-
-class fairchem.core.tests.conftest.ApproxExtension#
-

Bases: syrupy.extensions.amber.AmberSnapshotExtension

-

By default, syrupy uses the __repr__ of the expected (snapshot) and actual values -to serialize them into strings. Then, it compares the strings to see if they match.

-

However, this behavior is not ideal for comparing floats/ndarrays. For example, -if we have a snapshot with a float value of 0.1, and the actual value is 0.10000000000000001, -then the strings will not match, even though the values are effectively equal.

-

To work around this, we override the serialize method to seralize the expected value -into a special representation. Then, we override the matches function (which originally does a -simple string comparison) to parse the expected and actual values into numpy arrays. -Finally, we compare the arrays using np.allclose.

-
-
-matches(*, serialized_data: syrupy.types.SerializableData, snapshot_data: syrupy.types.SerializableData) bool#
-
- -
-
-serialize(data, **kwargs)#
-
- -
- -
-
-fairchem.core.tests.conftest.snapshot(snapshot)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.html b/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.html deleted file mode 100644 index 5c6294052..000000000 --- a/autoapi/fairchem/core/tests/datasets/test_ase_datasets/index.html +++ /dev/null @@ -1,732 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.datasets.test_ase_datasets — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.datasets.test_ase_datasets#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - -

ase_dataset(request, tmp_path_factory)

test_ase_dataset(ase_dataset)

test_ase_read_dataset(→ None)

test_ase_metadata_guesser(→ None)

test_db_add_delete(→ None)

test_ase_multiread_dataset(→ None)

test_empty_dataset(tmp_path)

-
-
-

Attributes#

- - - - - - - - - -

structures

calc

-
-
-fairchem.core.tests.datasets.test_ase_datasets.structures#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.calc#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.ase_dataset(request, tmp_path_factory)#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_ase_dataset(ase_dataset)#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_ase_read_dataset(tmp_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_ase_metadata_guesser(ase_dataset) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_db_add_delete(tmp_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_ase_multiread_dataset(tmp_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_datasets.test_empty_dataset(tmp_path)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.html b/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.html deleted file mode 100644 index c6a407396..000000000 --- a/autoapi/fairchem/core/tests/datasets/test_ase_lmdb/index.html +++ /dev/null @@ -1,772 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.datasets.test_ase_lmdb — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.datasets.test_ase_lmdb#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

generate_random_structure()

ase_lmbd_path(tmp_path_factory)

test_aselmdb_write(→ None)

test_aselmdb_count(→ None)

test_aselmdb_delete(→ None)

test_aselmdb_randomreads(→ None)

test_aselmdb_constraintread(→ None)

test_update_keyvalue_pair(→ None)

test_update_atoms(→ None)

test_metadata(→ None)

-
-
-

Attributes#

- - - - - - - - - - - - -

N_WRITES

N_READS

test_structures

-
-
-fairchem.core.tests.datasets.test_ase_lmdb.N_WRITES = 100#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.N_READS = 200#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_structures#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.generate_random_structure()#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.ase_lmbd_path(tmp_path_factory)#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_write(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_count(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_delete(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_randomreads(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_aselmdb_constraintread(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_update_keyvalue_pair(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_update_atoms(ase_lmbd_path) None#
-
- -
-
-fairchem.core.tests.datasets.test_ase_lmdb.test_metadata(ase_lmbd_path) None#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/datasets/test_utils/index.html b/autoapi/fairchem/core/tests/datasets/test_utils/index.html deleted file mode 100644 index 252defa2a..000000000 --- a/autoapi/fairchem/core/tests/datasets/test_utils/index.html +++ /dev/null @@ -1,653 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.datasets.test_utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.datasets.test_utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.tests.datasets.test_utils#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

pyg_data()

test_rename_data_object_keys(pyg_data)

-
-
-fairchem.core.tests.datasets.test_utils.pyg_data()#
-
- -
-
-fairchem.core.tests.datasets.test_utils.test_rename_data_object_keys(pyg_data)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.html b/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.html deleted file mode 100644 index 51527499a..000000000 --- a/autoapi/fairchem/core/tests/evaluator/test_evaluator/index.html +++ /dev/null @@ -1,766 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.evaluator.test_evaluator — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.evaluator.test_evaluator#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - -

TestMetrics

TestS2EFEval

TestIS2RSEval

TestIS2REEval

-
-
-

Functions#

- - - - - - - - - - - - -

load_evaluator_s2ef(→ None)

load_evaluator_is2rs(→ None)

load_evaluator_is2re(→ None)

-
-
-fairchem.core.tests.evaluator.test_evaluator.load_evaluator_s2ef(request) None#
-
- -
-
-fairchem.core.tests.evaluator.test_evaluator.load_evaluator_is2rs(request) None#
-
- -
-
-fairchem.core.tests.evaluator.test_evaluator.load_evaluator_is2re(request) None#
-
- -
-
-class fairchem.core.tests.evaluator.test_evaluator.TestMetrics#
-
-
-test_cosine_similarity() None#
-
- -
-
-test_magnitude_error() None#
-
- -
- -
-
-class fairchem.core.tests.evaluator.test_evaluator.TestS2EFEval#
-
-
-test_metrics_exist() None#
-
- -
- -
-
-class fairchem.core.tests.evaluator.test_evaluator.TestIS2RSEval#
-
-
-test_metrics_exist() None#
-
- -
- -
-
-class fairchem.core.tests.evaluator.test_evaluator.TestIS2REEval#
-
-
-test_metrics_exist() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/index.html b/autoapi/fairchem/core/tests/index.html deleted file mode 100644 index 4253009c4..000000000 --- a/autoapi/fairchem/core/tests/index.html +++ /dev/null @@ -1,637 +0,0 @@ - - - - - - - - - - - fairchem.core.tests — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.tests#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_dimenetpp/index.html b/autoapi/fairchem/core/tests/models/test_dimenetpp/index.html deleted file mode 100644 index a853be32a..000000000 --- a/autoapi/fairchem/core/tests/models/test_dimenetpp/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_dimenetpp — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_dimenetpp

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_dimenetpp#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestDimeNet

-
-
-

Functions#

- - - - - - - - - -

load_data(→ None)

load_model(→ None)

-
-
-fairchem.core.tests.models.test_dimenetpp.load_data(request) None#
-
- -
-
-fairchem.core.tests.models.test_dimenetpp.load_model(request) None#
-
- -
-
-class fairchem.core.tests.models.test_dimenetpp.TestDimeNet#
-
-
-test_rotation_invariance() None#
-
- -
-
-test_energy_force_shape(snapshot) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.html b/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.html deleted file mode 100644 index 5c3bb7f62..000000000 --- a/autoapi/fairchem/core/tests/models/test_equiformer_v2/index.html +++ /dev/null @@ -1,707 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_equiformer_v2 — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.models.test_equiformer_v2#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

TestEquiformerV2

TestMPrimaryLPrimary

-
-
-

Functions#

- - - - - - - - - -

load_data(request)

load_model(request)

-
-
-fairchem.core.tests.models.test_equiformer_v2.load_data(request)#
-
- -
-
-fairchem.core.tests.models.test_equiformer_v2.load_model(request)#
-
- -
-
-class fairchem.core.tests.models.test_equiformer_v2.TestEquiformerV2#
-
-
-test_energy_force_shape(snapshot)#
-
- -
- -
-
-class fairchem.core.tests.models.test_equiformer_v2.TestMPrimaryLPrimary#
-
-
-test_mprimary_lprimary_mappings()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_escn/index.html b/autoapi/fairchem/core/tests/models/test_escn/index.html deleted file mode 100644 index de680b36b..000000000 --- a/autoapi/fairchem/core/tests/models/test_escn/index.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_escn — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_escn

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_escn#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestMPrimaryLPrimary

-
-
-class fairchem.core.tests.models.test_escn.TestMPrimaryLPrimary#
-
-
-test_mprimary_lprimary_mappings()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_gemnet/index.html b/autoapi/fairchem/core/tests/models/test_gemnet/index.html deleted file mode 100644 index de29cfe11..000000000 --- a/autoapi/fairchem/core/tests/models/test_gemnet/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_gemnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_gemnet

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_gemnet#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestGemNetT

-
-
-

Functions#

- - - - - - - - - -

load_data(→ None)

load_model(→ None)

-
-
-fairchem.core.tests.models.test_gemnet.load_data(request) None#
-
- -
-
-fairchem.core.tests.models.test_gemnet.load_model(request) None#
-
- -
-
-class fairchem.core.tests.models.test_gemnet.TestGemNetT#
-
-
-test_rotation_invariance() None#
-
- -
-
-test_energy_force_shape(snapshot) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.html b/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.html deleted file mode 100644 index 4cb5a105d..000000000 --- a/autoapi/fairchem/core/tests/models/test_gemnet_oc/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_gemnet_oc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_gemnet_oc

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_gemnet_oc#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestGemNetOC

-
-
-

Functions#

- - - - - - - - - -

load_data(→ None)

load_model(→ None)

-
-
-fairchem.core.tests.models.test_gemnet_oc.load_data(request) None#
-
- -
-
-fairchem.core.tests.models.test_gemnet_oc.load_model(request) None#
-
- -
-
-class fairchem.core.tests.models.test_gemnet_oc.TestGemNetOC#
-
-
-test_rotation_invariance() None#
-
- -
-
-test_energy_force_shape(snapshot) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html b/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html deleted file mode 100644 index 49e05ea46..000000000 --- a/autoapi/fairchem/core/tests/models/test_gemnet_oc_scaling_mismatch/index.html +++ /dev/null @@ -1,678 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestGemNetOC

-
-
-class fairchem.core.tests.models.test_gemnet_oc_scaling_mismatch.TestGemNetOC#
-
-
-test_no_scaling_mismatch() None#
-
- -
-
-test_scaling_mismatch() None#
-
- -
-
-test_no_file_exists() None#
-
- -
-
-test_not_fitted() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/models/test_schnet/index.html b/autoapi/fairchem/core/tests/models/test_schnet/index.html deleted file mode 100644 index 576e063fc..000000000 --- a/autoapi/fairchem/core/tests/models/test_schnet/index.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.models.test_schnet — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.models.test_schnet

- -
- -
-
- - - - -
- -
-

fairchem.core.tests.models.test_schnet#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestSchNet

-
-
-

Functions#

- - - - - - - - - -

load_data(→ None)

load_model(→ None)

-
-
-fairchem.core.tests.models.test_schnet.load_data(request) None#
-
- -
-
-fairchem.core.tests.models.test_schnet.load_model(request) None#
-
- -
-
-class fairchem.core.tests.models.test_schnet.TestSchNet#
-
-
-test_rotation_invariance() None#
-
- -
-
-test_energy_force_shape(snapshot) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/preprocessing/index.html b/autoapi/fairchem/core/tests/preprocessing/index.html deleted file mode 100644 index 1a340a663..000000000 --- a/autoapi/fairchem/core/tests/preprocessing/index.html +++ /dev/null @@ -1,624 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.preprocessing — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.preprocessing

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.tests.preprocessing#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.html b/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.html deleted file mode 100644 index 4cfc60d06..000000000 --- a/autoapi/fairchem/core/tests/preprocessing/test_atoms_to_graphs/index.html +++ /dev/null @@ -1,688 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.preprocessing.test_atoms_to_graphs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.preprocessing.test_atoms_to_graphs#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestAtomsToGraphs

-
-
-

Functions#

- - - - - - -

atoms_to_graphs_internals(→ None)

-
-
-fairchem.core.tests.preprocessing.test_atoms_to_graphs.atoms_to_graphs_internals(request) None#
-
- -
-
-class fairchem.core.tests.preprocessing.test_atoms_to_graphs.TestAtomsToGraphs#
-
-
-test_gen_neighbors_pymatgen() None#
-
- -
-
-test_convert() None#
-
- -
-
-test_convert_all() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.html b/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.html deleted file mode 100644 index 6926ac36c..000000000 --- a/autoapi/fairchem/core/tests/preprocessing/test_pbc/index.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.preprocessing.test_pbc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.core.tests.preprocessing.test_pbc

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.core.tests.preprocessing.test_pbc#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestPBC

-
-
-

Functions#

- - - - - - -

load_data(→ None)

-
-
-fairchem.core.tests.preprocessing.test_pbc.load_data(request) None#
-
- -
-
-class fairchem.core.tests.preprocessing.test_pbc.TestPBC#
-
-
-test_pbc_distances() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.html b/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.html deleted file mode 100644 index 97d96a658..000000000 --- a/autoapi/fairchem/core/tests/preprocessing/test_radius_graph_pbc/index.html +++ /dev/null @@ -1,698 +0,0 @@ - - - - - - - - - - - fairchem.core.tests.preprocessing.test_radius_graph_pbc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.tests.preprocessing.test_radius_graph_pbc#

-

Copyright (c) Facebook, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestRadiusGraphPBC

-
-
-

Functions#

- - - - - - - - - -

load_data(→ None)

check_features_match(→ bool)

-
-
-fairchem.core.tests.preprocessing.test_radius_graph_pbc.load_data(request) None#
-
- -
-
-fairchem.core.tests.preprocessing.test_radius_graph_pbc.check_features_match(edge_index_1, cell_offsets_1, edge_index_2, cell_offsets_2) bool#
-
- -
-
-class fairchem.core.tests.preprocessing.test_radius_graph_pbc.TestRadiusGraphPBC#
-
-
-test_radius_graph_pbc() None#
-
- -
-
-test_bulk() None#
-
- -
-
-test_molecule() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/trainers/base_trainer/index.html b/autoapi/fairchem/core/trainers/base_trainer/index.html deleted file mode 100644 index 5a39d6f4b..000000000 --- a/autoapi/fairchem/core/trainers/base_trainer/index.html +++ /dev/null @@ -1,799 +0,0 @@ - - - - - - - - - - - fairchem.core.trainers.base_trainer — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.trainers.base_trainer#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

BaseTrainer

Helper class that provides a standard way to create an ABC using

-
-
-class fairchem.core.trainers.base_trainer.BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False)#
-

Bases: abc.ABC

-

Helper class that provides a standard way to create an ABC using -inheritance.

-
-
-property _unwrapped_model#
-
- -
-
-abstract train(disable_eval_tqdm: bool = False) None#
-

Run model training iterations.

-
- -
-
-static _get_timestamp(device: torch.device, suffix: str | None) str#
-
- -
-
-load() None#
-
- -
-
-set_seed(seed) None#
-
- -
-
-load_seed_from_config() None#
-
- -
-
-load_logger() None#
-
- -
-
-get_sampler(dataset, batch_size: int, shuffle: bool) fairchem.core.common.data_parallel.BalancedBatchSampler#
-
- -
-
-get_dataloader(dataset, sampler) torch.utils.data.DataLoader#
-
- -
-
-load_datasets() None#
-
- -
-
-load_task()#
-
- -
-
-load_model() None#
-
- -
-
-load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
-
- -
-
-load_loss() None#
-
- -
-
-load_optimizer() None#
-
- -
-
-load_extras() None#
-
- -
-
-save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) str | None#
-
- -
-
-update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) None#
-
- -
-
-validate(split: str = 'val', disable_tqdm: bool = False)#
-
- -
-
-_backward(loss) None#
-
- -
-
-save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/trainers/index.html b/autoapi/fairchem/core/trainers/index.html deleted file mode 100644 index b8f146a4d..000000000 --- a/autoapi/fairchem/core/trainers/index.html +++ /dev/null @@ -1,904 +0,0 @@ - - - - - - - - - - - fairchem.core.trainers — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.trainers#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - -

BaseTrainer

Helper class that provides a standard way to create an ABC using

OCPTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

-
-
-class fairchem.core.trainers.BaseTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier: str, timestamp_id: str | None = None, run_dir: str | None = None, is_debug: bool = False, print_every: int = 100, seed: int | None = None, logger: str = 'wandb', local_rank: int = 0, amp: bool = False, cpu: bool = False, name: str = 'ocp', slurm=None, noddp: bool = False)#
-

Bases: abc.ABC

-

Helper class that provides a standard way to create an ABC using -inheritance.

-
-
-property _unwrapped_model#
-
- -
-
-abstract train(disable_eval_tqdm: bool = False) None#
-

Run model training iterations.

-
- -
-
-static _get_timestamp(device: torch.device, suffix: str | None) str#
-
- -
-
-load() None#
-
- -
-
-set_seed(seed) None#
-
- -
-
-load_seed_from_config() None#
-
- -
-
-load_logger() None#
-
- -
-
-get_sampler(dataset, batch_size: int, shuffle: bool) fairchem.core.common.data_parallel.BalancedBatchSampler#
-
- -
-
-get_dataloader(dataset, sampler) torch.utils.data.DataLoader#
-
- -
-
-load_datasets() None#
-
- -
-
-load_task()#
-
- -
-
-load_model() None#
-
- -
-
-load_checkpoint(checkpoint_path: str, checkpoint: dict | None = None) None#
-
- -
-
-load_loss() None#
-
- -
-
-load_optimizer() None#
-
- -
-
-load_extras() None#
-
- -
-
-save(metrics=None, checkpoint_file: str = 'checkpoint.pt', training_state: bool = True) str | None#
-
- -
-
-update_best(primary_metric, val_metrics, disable_eval_tqdm: bool = True) None#
-
- -
-
-validate(split: str = 'val', disable_tqdm: bool = False)#
-
- -
-
-_backward(loss) None#
-
- -
-
-save_results(predictions: dict[str, numpy.typing.NDArray], results_file: str | None, keys: collections.abc.Sequence[str] | None = None) None#
-
- -
- -
-
-class fairchem.core.trainers.OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
-

Bases: fairchem.core.trainers.base_trainer.BaseTrainer

-

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to -Relaxed State (IS2RS) tasks.

-
-

Note

-

Examples of configurations for task, model, dataset and optimizer -can be found in configs/ocp_s2ef -and configs/ocp_is2rs.

-
-
-
Parameters:
-
    -
  • task (dict) – Task configuration.

  • -
  • model (dict) – Model configuration.

  • -
  • outputs (dict) – Output property configuration.

  • -
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • -
  • optimizer (dict) – Optimizer configuration.

  • -
  • loss_fns (dict) – Loss function configuration.

  • -
  • eval_metrics (dict) – Evaluation metrics configuration.

  • -
  • identifier (str) – Experiment identifier that is appended to log directory.

  • -
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. -(default: None)

  • -
  • is_debug (bool, optional) – Run in debug mode. -(default: False)

  • -
  • print_every (int, optional) – Frequency of printing logs. -(default: 100)

  • -
  • seed (int, optional) – Random number seed. -(default: None)

  • -
  • logger (str, optional) – Type of logger to be used. -(default: wandb)

  • -
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. -(default: 0)

  • -
  • amp (bool, optional) – Run using automatic mixed precision. -(default: False)

  • -
  • slurm (dict) – Slurm configuration. Currently just for keeping track. -(default: {})

  • -
  • noddp (bool, optional) – Run model without DDP.

  • -
-
-
-
-
-train(disable_eval_tqdm: bool = False) None#
-

Run model training iterations.

-
- -
-
-_forward(batch)#
-
- -
-
-_compute_loss(out, batch)#
-
- -
-
-_compute_metrics(out, batch, evaluator, metrics=None)#
-
- -
-
-predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False)#
-
- -
-
-run_relaxations(split='val')#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/core/trainers/ocp_trainer/index.html b/autoapi/fairchem/core/trainers/ocp_trainer/index.html deleted file mode 100644 index 110280eec..000000000 --- a/autoapi/fairchem/core/trainers/ocp_trainer/index.html +++ /dev/null @@ -1,731 +0,0 @@ - - - - - - - - - - - fairchem.core.trainers.ocp_trainer — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.core.trainers.ocp_trainer#

-

Copyright (c) Meta, Inc. and its affiliates.

-

This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree.

-
-

Module Contents#

-
-

Classes#

- - - - - - -

OCPTrainer

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to

-
-
-class fairchem.core.trainers.ocp_trainer.OCPTrainer(task, model, outputs, dataset, optimizer, loss_fns, eval_metrics, identifier, timestamp_id=None, run_dir=None, is_debug=False, print_every=100, seed=None, logger='wandb', local_rank=0, amp=False, cpu=False, slurm=None, noddp=False, name='ocp')#
-

Bases: fairchem.core.trainers.base_trainer.BaseTrainer

-

Trainer class for the Structure to Energy & Force (S2EF) and Initial State to -Relaxed State (IS2RS) tasks.

-
-

Note

-

Examples of configurations for task, model, dataset and optimizer -can be found in configs/ocp_s2ef -and configs/ocp_is2rs.

-
-
-
Parameters:
-
    -
  • task (dict) – Task configuration.

  • -
  • model (dict) – Model configuration.

  • -
  • outputs (dict) – Output property configuration.

  • -
  • dataset (dict) – Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.

  • -
  • optimizer (dict) – Optimizer configuration.

  • -
  • loss_fns (dict) – Loss function configuration.

  • -
  • eval_metrics (dict) – Evaluation metrics configuration.

  • -
  • identifier (str) – Experiment identifier that is appended to log directory.

  • -
  • run_dir (str, optional) – Path to the run directory where logs are to be saved. -(default: None)

  • -
  • is_debug (bool, optional) – Run in debug mode. -(default: False)

  • -
  • print_every (int, optional) – Frequency of printing logs. -(default: 100)

  • -
  • seed (int, optional) – Random number seed. -(default: None)

  • -
  • logger (str, optional) – Type of logger to be used. -(default: wandb)

  • -
  • local_rank (int, optional) – Local rank of the process, only applicable for distributed training. -(default: 0)

  • -
  • amp (bool, optional) – Run using automatic mixed precision. -(default: False)

  • -
  • slurm (dict) – Slurm configuration. Currently just for keeping track. -(default: {})

  • -
  • noddp (bool, optional) – Run model without DDP.

  • -
-
-
-
-
-train(disable_eval_tqdm: bool = False) None#
-

Run model training iterations.

-
- -
-
-_forward(batch)#
-
- -
-
-_compute_loss(out, batch)#
-
- -
-
-_compute_metrics(out, batch, evaluator, metrics=None)#
-
- -
-
-predict(data_loader, per_image: bool = True, results_file: str | None = None, disable_tqdm: bool = False)#
-
- -
-
-run_relaxations(split='val')#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/adsorbate/index.html b/autoapi/fairchem/data/oc/core/adsorbate/index.html deleted file mode 100644 index 6ebf3fba0..000000000 --- a/autoapi/fairchem/data/oc/core/adsorbate/index.html +++ /dev/null @@ -1,725 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core.adsorbate — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core.adsorbate#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Adsorbate

Initializes an adsorbate object in one of 4 ways:

-
-
-

Functions#

- - - - - - -

randomly_rotate_adsorbate(adsorbate_atoms[, mode, ...])

-
-
-class fairchem.data.oc.core.adsorbate.Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None)#
-

Initializes an adsorbate object in one of 4 ways: -- Directly pass in an ase.Atoms object.

-
-

For this, you should also provide the index of the binding atom.

-
-
    -
  • Pass in index of adsorbate to select from adsorbate database.

  • -
  • Pass in the SMILES string of the adsorbate to select from the database.

  • -
  • Randomly sample an adsorbate from the adsorbate database.

  • -
-
-
Parameters:
-
    -
  • adsorbate_atoms (ase.Atoms) – Adsorbate structure.

  • -
  • adsorbate_id_from_db (int) – Index of adsorbate to select.

  • -
  • adsorbate_smiles_from_db (str) – A SMILES string of the desired adsorbate.

  • -
  • adsorbate_db_path (str) – Path to adsorbate database.

  • -
  • adsorbate_binding_indices (list) – The index/indices of the adsorbate atoms which are expected to bind.

  • -
-
-
-
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-_get_adsorbate_from_random(adsorbate_db)#
-
- -
-
-_load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) None#
-

Saves the fields from an adsorbate stored in a database. Fields added -after the first revision are conditionally added for backwards -compatibility with older database files.

-
- -
- -
-
-fairchem.data.oc.core.adsorbate.randomly_rotate_adsorbate(adsorbate_atoms: ase.Atoms, mode: str = 'random', binding_idx: int = None)#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.html b/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.html deleted file mode 100644 index 3324cdafe..000000000 --- a/autoapi/fairchem/data/oc/core/adsorbate_slab_config/index.html +++ /dev/null @@ -1,919 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core.adsorbate_slab_config — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core.adsorbate_slab_config#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

AdsorbateSlabConfig

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

-
-
-

Functions#

- - - - - - - - - - - - - - - -

get_random_sites_on_triangle(vertices[, num_sites])

Sample num_sites random sites uniformly on a given 3D triangle.

custom_tile_atoms(atoms)

Tile the atoms so that the center tile has the indices and positions of the

get_interstitial_distances(adsorbate_slab_config)

Check to see if there is any atomic overlap between surface atoms

there_is_overlap(adsorbate_slab_config)

Check to see if there is any atomic overlap between surface atoms

-
-
-class fairchem.data.oc.core.adsorbate_slab_config.AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random')#
-

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

-
-
Parameters:
-
    -
  • slab (Slab) – Slab object.

  • -
  • adsorbate (Adsorbate) – Adsorbate object.

  • -
  • num_sites (int) – Number of sites to sample.

  • -
  • num_augmentations_per_site (int) – Number of augmentations of the adsorbate per site. Total number of -generated structures will be num_sites * num_augmentations_per_site.

  • -
  • interstitial_gap (float) – Minimum distance in Angstroms between adsorbate and slab atoms.

  • -
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. -This affects surface site sampling and adsorbate placement on each site.

    -

    In “random”, we do a Delaunay triangulation of the surface atoms, then -sample sites uniformly at random within each triangle. When placing the -adsorbate, we randomly rotate it along xyz, and place it such that the -center of mass is at the site.

    -

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most -energetically favorable sites, i.e., ontop, bridge, or hollow sites. -When placing the adsorbate, we randomly rotate it along z with only -slight rotation along x and y, and place it such that the binding atom -is at the site.

    -

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of -the surface atoms, then sample sites uniformly at random within each -triangle. When placing the adsorbate, we randomly rotate it along z with -only slight rotation along x and y, and place it such that the binding -atom is at the site.

    -

    In all cases, the adsorbate is placed at the closest position of no -overlap with the slab plus interstitial_gap along the surface normal.

    -

  • -
-
-
-
-
-get_binding_sites(num_sites: int)#
-

Returns up to num_sites sites given the surface atoms’ positions.

-
- -
-
-place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding site.

-
- -
-
-place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding sites.

-
- -
-
-_get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1)#
-

Get the scaled normal that gives a proximate configuration without atomic -overlap by:

-
-
    -
  1. Projecting the adsorbate and surface atoms onto the surface plane.

  2. -
  3. -
    Identify all adsorbate atom - surface atom combinations for which

    an itersection when translating along the normal would occur. -This is where the distance between the projected points is less than -r_surface_atom + r_adsorbate_atom

    -
    -
    -
  4. -
  5. -
    Explicitly solve for the scaled normal at which the distance between

    surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + -interstitial_gap. This exploits the superposition of vectors and the -distance formula, so it requires root finding.

    -
    -
    -
  6. -
-
-

Assumes that the adsorbate’s binding atom or center-of-mass (depending -on mode) is already placed at the site.

-
-
Parameters:
-
    -
  • adsorbate_c (ase.Atoms) – A copy of the adsorbate with coordinates at the site

  • -
  • slab_c (ase.Atoms) – A copy of the slab

  • -
  • site (np.ndarray) – the coordinate of the site

  • -
  • adsorbate_atoms (ase.Atoms) – the translated adsorbate

  • -
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • -
  • interstitial_gap (float) – the desired distance between the covalent radii of the -closest surface and adsorbate atom

  • -
-
-
Returns:
-

the magnitude of the normal vector for placement

-
-
Return type:
-

(float)

-
-
-
- -
-
-_find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float)#
-

Find the pairs of surface and adsorbate atoms that would have an intersection event -while traversing the normal vector. For each pair, return pertanent information for -finding the point of intersection. -:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site -:type adsorbate_c2: ase.Atoms -:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

-
-

about the site

-
-
-
Parameters:
-
    -
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • -
  • interstitial_gap (float) – the desired distance between the covalent radii of the -closest surface and adsorbate atom

  • -
-
-
Returns:
-

-
each entry in the list corresponds to one pair to check. With the
-
following information:

[(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position]

-
-
-
-
-

-
-
Return type:
-

(list[lists])

-
-
-
- -
-
-_get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray)#
-

Find the x and y coordinates of each atom projected onto the surface plane. -:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site -:type adsorbate_c2: ase.Atoms -:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

-
-

about the site

-
-
-
Parameters:
-

unit_normal (np.ndarray) – the unit vector normal to the surface

-
-
Returns:
-

{“ads”: [[x1, y1], [x2, y2], …], “slab”: [[x1, y1], [x2, y2], …],}

-
-
Return type:
-

(dict)

-
-
-
- -
-
-get_metadata_dict(ind)#
-

Returns a dict containing the atoms object and metadata for -one specified config, used for writing to files.

-
- -
- -
-
-fairchem.data.oc.core.adsorbate_slab_config.get_random_sites_on_triangle(vertices: numpy.ndarray, num_sites: int = 10)#
-

Sample num_sites random sites uniformly on a given 3D triangle. -Following Sec. 4.2 from https://www.cs.princeton.edu/~funk/tog02.pdf.

-
- -
-
-fairchem.data.oc.core.adsorbate_slab_config.custom_tile_atoms(atoms: ase.Atoms)#
-

Tile the atoms so that the center tile has the indices and positions of the -untiled structure.

-
-
Parameters:
-

atoms (ase.Atoms) – the atoms object to be tiled

-
-
Returns:
-

-
the tiled atoms which has been repeated 3 times in

the x and y directions but maintains the original indices on the central -unit cell.

-
-
-

-
-
Return type:
-

(ase.Atoms)

-
-
-
- -
-
-fairchem.data.oc.core.adsorbate_slab_config.get_interstitial_distances(adsorbate_slab_config: ase.Atoms)#
-

Check to see if there is any atomic overlap between surface atoms -and adsorbate atoms.

-
-
Parameters:
-

adsorbate_slab_configuration (ase.Atoms) – an slab atoms object with an -adsorbate placed

-
-
Returns:
-

True if there is atomic overlap, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-fairchem.data.oc.core.adsorbate_slab_config.there_is_overlap(adsorbate_slab_config: ase.Atoms)#
-

Check to see if there is any atomic overlap between surface atoms -and adsorbate atoms.

-
-
Parameters:
-

adsorbate_slab_configuration (ase.Atoms) – an slab atoms object with an -adsorbate placed

-
-
Returns:
-

True if there is atomic overlap, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/bulk/index.html b/autoapi/fairchem/data/oc/core/bulk/index.html deleted file mode 100644 index c63136b92..000000000 --- a/autoapi/fairchem/data/oc/core/bulk/index.html +++ /dev/null @@ -1,721 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core.bulk — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core.bulk#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Bulk

Initializes a bulk object in one of 4 ways:

-
-
-class fairchem.data.oc.core.bulk.Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None)#
-

Initializes a bulk object in one of 4 ways: -- Directly pass in an ase.Atoms object. -- Pass in index of bulk to select from bulk database. -- Pass in the src_id of the bulk to select from the bulk database. -- Randomly sample a bulk from bulk database if no other option is passed.

-
-
Parameters:
-
    -
  • bulk_atoms (ase.Atoms) – Bulk structure.

  • -
  • bulk_id_from_db (int) – Index of bulk in database pkl to select.

  • -
  • bulk_src_id_from_db (int) – Src id of bulk to select (e.g. “mp-30”).

  • -
  • bulk_db_path (str) – Path to bulk database.

  • -
  • bulk_db (List[Dict[str, Any]]) – Already-loaded database.

  • -
-
-
-
-
-_get_bulk_from_random(bulk_db)#
-
- -
-
-set_source_dataset_id(src_id: str)#
-
- -
-
-set_bulk_id_from_db(bulk_id_from_db: int)#
-
- -
-
-get_slabs(max_miller=2, precomputed_slabs_dir=None)#
-

Returns a list of possible slabs for this bulk instance.

-
- -
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-__eq__(other) bool#
-

Return self==value.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/index.html b/autoapi/fairchem/data/oc/core/index.html deleted file mode 100644 index 3128fa7d4..000000000 --- a/autoapi/fairchem/data/oc/core/index.html +++ /dev/null @@ -1,1187 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - -

Bulk

Initializes a bulk object in one of 4 ways:

Slab

Initializes a slab object, i.e. a particular slab tiled along xyz, in

Adsorbate

Initializes an adsorbate object in one of 4 ways:

AdsorbateSlabConfig

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

MultipleAdsorbateSlabConfig

Class to represent a slab with multiple adsorbates on it. This class only

-
-
-class fairchem.data.oc.core.Bulk(bulk_atoms: ase.Atoms = None, bulk_id_from_db: int = None, bulk_src_id_from_db: str = None, bulk_db_path: str = BULK_PKL_PATH, bulk_db: List[Dict[str, Any]] = None)#
-

Initializes a bulk object in one of 4 ways: -- Directly pass in an ase.Atoms object. -- Pass in index of bulk to select from bulk database. -- Pass in the src_id of the bulk to select from the bulk database. -- Randomly sample a bulk from bulk database if no other option is passed.

-
-
Parameters:
-
    -
  • bulk_atoms (ase.Atoms) – Bulk structure.

  • -
  • bulk_id_from_db (int) – Index of bulk in database pkl to select.

  • -
  • bulk_src_id_from_db (int) – Src id of bulk to select (e.g. “mp-30”).

  • -
  • bulk_db_path (str) – Path to bulk database.

  • -
  • bulk_db (List[Dict[str, Any]]) – Already-loaded database.

  • -
-
-
-
-
-_get_bulk_from_random(bulk_db)#
-
- -
-
-set_source_dataset_id(src_id: str)#
-
- -
-
-set_bulk_id_from_db(bulk_id_from_db: int)#
-
- -
-
-get_slabs(max_miller=2, precomputed_slabs_dir=None)#
-

Returns a list of possible slabs for this bulk instance.

-
- -
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-__eq__(other) bool#
-

Return self==value.

-
- -
- -
-
-class fairchem.data.oc.core.Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8)#
-

Initializes a slab object, i.e. a particular slab tiled along xyz, in -one of 2 ways: -- Pass in a Bulk object and a slab 5-tuple containing -(atoms, miller, shift, top, oriented bulk). -- Pass in a Bulk object and randomly sample a slab.

-
-
Parameters:
-
    -
  • bulk (Bulk) – Corresponding Bulk object.

  • -
  • slab_atoms (ase.Atoms) – Slab atoms, tiled and tagged

  • -
  • millers (tuple) – Miller indices of slab.

  • -
  • shift (float) – Shift of slab.

  • -
  • top (bool) – Whether slab is top or bottom.

  • -
  • min_ab (float) – To confirm that the tiled structure spans this distance

  • -
-
-
-
-
-classmethod from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0)#
-
- -
-
-classmethod from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs)#
-
- -
-
-has_surface_tagged()#
-
- -
-
-get_metadata_dict()#
-
- -
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-__eq__(other)#
-

Return self==value.

-
- -
- -
-
-class fairchem.data.oc.core.Adsorbate(adsorbate_atoms: ase.Atoms = None, adsorbate_id_from_db: int = None, adsorbate_smiles_from_db: str = None, adsorbate_db_path: str = ADSORBATES_PKL_PATH, adsorbate_db: Dict[int, Tuple[Any, Ellipsis]] = None, adsorbate_binding_indices: list = None)#
-

Initializes an adsorbate object in one of 4 ways: -- Directly pass in an ase.Atoms object.

-
-

For this, you should also provide the index of the binding atom.

-
-
    -
  • Pass in index of adsorbate to select from adsorbate database.

  • -
  • Pass in the SMILES string of the adsorbate to select from the database.

  • -
  • Randomly sample an adsorbate from the adsorbate database.

  • -
-
-
Parameters:
-
    -
  • adsorbate_atoms (ase.Atoms) – Adsorbate structure.

  • -
  • adsorbate_id_from_db (int) – Index of adsorbate to select.

  • -
  • adsorbate_smiles_from_db (str) – A SMILES string of the desired adsorbate.

  • -
  • adsorbate_db_path (str) – Path to adsorbate database.

  • -
  • adsorbate_binding_indices (list) – The index/indices of the adsorbate atoms which are expected to bind.

  • -
-
-
-
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-_get_adsorbate_from_random(adsorbate_db)#
-
- -
-
-_load_adsorbate(adsorbate: Tuple[Any, Ellipsis]) None#
-

Saves the fields from an adsorbate stored in a database. Fields added -after the first revision are conditionally added for backwards -compatibility with older database files.

-
- -
- -
-
-class fairchem.data.oc.core.AdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbate: fairchem.data.oc.core.Adsorbate, num_sites: int = 100, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1, mode: str = 'random')#
-

Initializes a list of adsorbate-catalyst systems for a given Adsorbate and Slab.

-
-
Parameters:
-
    -
  • slab (Slab) – Slab object.

  • -
  • adsorbate (Adsorbate) – Adsorbate object.

  • -
  • num_sites (int) – Number of sites to sample.

  • -
  • num_augmentations_per_site (int) – Number of augmentations of the adsorbate per site. Total number of -generated structures will be num_sites * num_augmentations_per_site.

  • -
  • interstitial_gap (float) – Minimum distance in Angstroms between adsorbate and slab atoms.

  • -
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. -This affects surface site sampling and adsorbate placement on each site.

    -

    In “random”, we do a Delaunay triangulation of the surface atoms, then -sample sites uniformly at random within each triangle. When placing the -adsorbate, we randomly rotate it along xyz, and place it such that the -center of mass is at the site.

    -

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most -energetically favorable sites, i.e., ontop, bridge, or hollow sites. -When placing the adsorbate, we randomly rotate it along z with only -slight rotation along x and y, and place it such that the binding atom -is at the site.

    -

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of -the surface atoms, then sample sites uniformly at random within each -triangle. When placing the adsorbate, we randomly rotate it along z with -only slight rotation along x and y, and place it such that the binding -atom is at the site.

    -

    In all cases, the adsorbate is placed at the closest position of no -overlap with the slab plus interstitial_gap along the surface normal.

    -

  • -
-
-
-
-
-get_binding_sites(num_sites: int)#
-

Returns up to num_sites sites given the surface atoms’ positions.

-
- -
-
-place_adsorbate_on_site(adsorbate: fairchem.data.oc.core.Adsorbate, site: numpy.ndarray, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding site.

-
- -
-
-place_adsorbate_on_sites(sites: list, num_augmentations_per_site: int = 1, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding sites.

-
- -
-
-_get_scaled_normal(adsorbate_c: ase.Atoms, slab_c: ase.Atoms, site: numpy.ndarray, unit_normal: numpy.ndarray, interstitial_gap: float = 0.1)#
-

Get the scaled normal that gives a proximate configuration without atomic -overlap by:

-
-
    -
  1. Projecting the adsorbate and surface atoms onto the surface plane.

  2. -
  3. -
    Identify all adsorbate atom - surface atom combinations for which

    an itersection when translating along the normal would occur. -This is where the distance between the projected points is less than -r_surface_atom + r_adsorbate_atom

    -
    -
    -
  4. -
  5. -
    Explicitly solve for the scaled normal at which the distance between

    surface atom and adsorbate atom = r_surface_atom + r_adsorbate_atom + -interstitial_gap. This exploits the superposition of vectors and the -distance formula, so it requires root finding.

    -
    -
    -
  6. -
-
-

Assumes that the adsorbate’s binding atom or center-of-mass (depending -on mode) is already placed at the site.

-
-
Parameters:
-
    -
  • adsorbate_c (ase.Atoms) – A copy of the adsorbate with coordinates at the site

  • -
  • slab_c (ase.Atoms) – A copy of the slab

  • -
  • site (np.ndarray) – the coordinate of the site

  • -
  • adsorbate_atoms (ase.Atoms) – the translated adsorbate

  • -
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • -
  • interstitial_gap (float) – the desired distance between the covalent radii of the -closest surface and adsorbate atom

  • -
-
-
Returns:
-

the magnitude of the normal vector for placement

-
-
Return type:
-

(float)

-
-
-
- -
-
-_find_combos_to_check(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray, interstitial_gap: float)#
-

Find the pairs of surface and adsorbate atoms that would have an intersection event -while traversing the normal vector. For each pair, return pertanent information for -finding the point of intersection. -:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site -:type adsorbate_c2: ase.Atoms -:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

-
-

about the site

-
-
-
Parameters:
-
    -
  • unit_normal (np.ndarray) – the unit vector normal to the surface

  • -
  • interstitial_gap (float) – the desired distance between the covalent radii of the -closest surface and adsorbate atom

  • -
-
-
Returns:
-

-
each entry in the list corresponds to one pair to check. With the
-
following information:

[(adsorbate_idx, slab_idx), r_adsorbate_atom + r_slab_atom, slab_atom_position]

-
-
-
-
-

-
-
Return type:
-

(list[lists])

-
-
-
- -
-
-_get_projected_points(adsorbate_c2: ase.Atoms, slab_c2: ase.Atoms, unit_normal: numpy.ndarray)#
-

Find the x and y coordinates of each atom projected onto the surface plane. -:param adsorbate_c2: A copy of the adsorbate with coordinates at the centered site -:type adsorbate_c2: ase.Atoms -:param slab_c2: A copy of the slab with atoms wrapped s.t. things are centered

-
-

about the site

-
-
-
Parameters:
-

unit_normal (np.ndarray) – the unit vector normal to the surface

-
-
Returns:
-

{“ads”: [[x1, y1], [x2, y2], …], “slab”: [[x1, y1], [x2, y2], …],}

-
-
Return type:
-

(dict)

-
-
-
- -
-
-get_metadata_dict(ind)#
-

Returns a dict containing the atoms object and metadata for -one specified config, used for writing to files.

-
- -
- -
-
-class fairchem.data.oc.core.MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement')#
-

Bases: fairchem.data.oc.core.AdsorbateSlabConfig

-

Class to represent a slab with multiple adsorbates on it. This class only -returns a fixed combination of adsorbates placed on the surface. Unlike -AdsorbateSlabConfig which enumerates all possible adsorbate placements, this -problem gets combinatorially large.

-
-
Parameters:
-
    -
  • slab (Slab) – Slab object.

  • -
  • adsorbates (List[Adsorbate]) – List of adsorbate objects to place on the slab.

  • -
  • num_sites (int) – Number of sites to sample.

  • -
  • num_configurations (int) – Number of configurations to generate per slab+adsorbate(s) combination. -This corresponds to selecting different site combinations to place -the adsorbates on.

  • -
  • interstitial_gap (float) – Minimum distance, in Angstroms, between adsorbate and slab atoms as -well as the inter-adsorbate distance.

  • -
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. -This affects surface site sampling and adsorbate placement on each site.

    -

    In “random”, we do a Delaunay triangulation of the surface atoms, then -sample sites uniformly at random within each triangle. When placing the -adsorbate, we randomly rotate it along xyz, and place it such that the -center of mass is at the site.

    -

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most -energetically favorable sites, i.e., ontop, bridge, or hollow sites. -When placing the adsorbate, we randomly rotate it along z with only -slight rotation along x and y, and place it such that the binding atom -is at the site.

    -

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of -the surface atoms, then sample sites uniformly at random within each -triangle. When placing the adsorbate, we randomly rotate it along z with -only slight rotation along x and y, and place it such that the binding -atom is at the site.

    -

    In all cases, the adsorbate is placed at the closest position of no -overlap with the slab plus interstitial_gap along the surface normal.

    -

  • -
-
-
-
-
-place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding sites.

-

This method generates a fixed number of configurations where sites are -selected to ensure that adsorbate binding indices are at least a fair -distance away from each other (covalent radii + interstitial gap). -While this helps prevent adsorbate overlap it does not gaurantee it -since non-binding adsorbate atoms can overlap if the right combination -of angles is sampled.

-
- -
-
-get_metadata_dict(ind)#
-

Returns a dict containing the atoms object and metadata for -one specified config, used for writing to files.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.html b/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.html deleted file mode 100644 index 508f10563..000000000 --- a/autoapi/fairchem/data/oc/core/multi_adsorbate_slab_config/index.html +++ /dev/null @@ -1,729 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core.multi_adsorbate_slab_config — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core.multi_adsorbate_slab_config#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

MultipleAdsorbateSlabConfig

Class to represent a slab with multiple adsorbates on it. This class only

-
-
-

Functions#

- - - - - - -

update_distance_map(prev_distance_map, site_idx, ...)

Given a new site and the adsorbate we plan on placing there,

-
-
-class fairchem.data.oc.core.multi_adsorbate_slab_config.MultipleAdsorbateSlabConfig(slab: fairchem.data.oc.core.Slab, adsorbates: List[fairchem.data.oc.core.Adsorbate], num_sites: int = 100, num_configurations: int = 1, interstitial_gap: float = 0.1, mode: str = 'random_site_heuristic_placement')#
-

Bases: fairchem.data.oc.core.AdsorbateSlabConfig

-

Class to represent a slab with multiple adsorbates on it. This class only -returns a fixed combination of adsorbates placed on the surface. Unlike -AdsorbateSlabConfig which enumerates all possible adsorbate placements, this -problem gets combinatorially large.

-
-
Parameters:
-
    -
  • slab (Slab) – Slab object.

  • -
  • adsorbates (List[Adsorbate]) – List of adsorbate objects to place on the slab.

  • -
  • num_sites (int) – Number of sites to sample.

  • -
  • num_configurations (int) – Number of configurations to generate per slab+adsorbate(s) combination. -This corresponds to selecting different site combinations to place -the adsorbates on.

  • -
  • interstitial_gap (float) – Minimum distance, in Angstroms, between adsorbate and slab atoms as -well as the inter-adsorbate distance.

  • -
  • mode (str) –

    “random”, “heuristic”, or “random_site_heuristic_placement”. -This affects surface site sampling and adsorbate placement on each site.

    -

    In “random”, we do a Delaunay triangulation of the surface atoms, then -sample sites uniformly at random within each triangle. When placing the -adsorbate, we randomly rotate it along xyz, and place it such that the -center of mass is at the site.

    -

    In “heuristic”, we use Pymatgen’s AdsorbateSiteFinder to find the most -energetically favorable sites, i.e., ontop, bridge, or hollow sites. -When placing the adsorbate, we randomly rotate it along z with only -slight rotation along x and y, and place it such that the binding atom -is at the site.

    -

    In “random_site_heuristic_placement”, we do a Delaunay triangulation of -the surface atoms, then sample sites uniformly at random within each -triangle. When placing the adsorbate, we randomly rotate it along z with -only slight rotation along x and y, and place it such that the binding -atom is at the site.

    -

    In all cases, the adsorbate is placed at the closest position of no -overlap with the slab plus interstitial_gap along the surface normal.

    -

  • -
-
-
-
-
-place_adsorbates_on_sites(sites: list, num_configurations: int = 1, interstitial_gap: float = 0.1)#
-

Place the adsorbate at the given binding sites.

-

This method generates a fixed number of configurations where sites are -selected to ensure that adsorbate binding indices are at least a fair -distance away from each other (covalent radii + interstitial gap). -While this helps prevent adsorbate overlap it does not gaurantee it -since non-binding adsorbate atoms can overlap if the right combination -of angles is sampled.

-
- -
-
-get_metadata_dict(ind)#
-

Returns a dict containing the atoms object and metadata for -one specified config, used for writing to files.

-
- -
- -
-
-fairchem.data.oc.core.multi_adsorbate_slab_config.update_distance_map(prev_distance_map, site_idx, adsorbate, pseudo_atoms)#
-

Given a new site and the adsorbate we plan on placing there, -update the distance mapping to reflect the new distances from sites to nearest adsorbates. -We incorporate the covalent radii of the placed adsorbate binding atom in our distance -calculation to prevent atom overlap.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/core/slab/index.html b/autoapi/fairchem/data/oc/core/slab/index.html deleted file mode 100644 index df2c55d47..000000000 --- a/autoapi/fairchem/data/oc/core/slab/index.html +++ /dev/null @@ -1,1081 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.core.slab — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.core.slab#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Slab

Initializes a slab object, i.e. a particular slab tiled along xyz, in

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

tile_and_tag_atoms(unit_slab_struct, bulk_atoms[, min_ab])

This function combines the next three functions that tile, tag,

set_fixed_atom_constraints(atoms)

This function fixes sub-surface atoms of a surface. Also works on systems

tag_surface_atoms([slab_atoms, bulk_atoms])

Sets the tags of an ase.Atoms object. Any atom that we consider a "bulk"

tile_atoms(atoms[, min_ab])

This function will repeat an atoms structure in the direction of the a and b

find_surface_atoms_by_height(surface_atoms)

As discussed in the docstring for find_surface_atoms_with_voronoi,

find_surface_atoms_with_voronoi_given_height(...)

Labels atoms as surface or bulk atoms according to their coordination

calculate_center_of_mass(struct)

Calculates the center of mass of the slab.

calculate_coordination_of_bulk_atoms(bulk_atoms)

Finds all unique atoms in a bulk structure and then determines their

compute_slabs([bulk_atoms, max_miller, specific_millers])

Enumerates all the symmetrically distinct slabs of a bulk structure.

flip_struct(struct)

Flips an atoms object upside down. Normally used to flip slabs.

is_structure_invertible(struct)

This function figures out whether or not an Structure

standardize_bulk(atoms)

There are many ways to define a bulk unit cell. If you change the unit

-
-
-class fairchem.data.oc.core.slab.Slab(bulk=None, slab_atoms: ase.Atoms = None, millers: tuple = None, shift: float = None, top: bool = None, oriented_bulk: pymatgen.core.structure.Structure = None, min_ab: float = 0.8)#
-

Initializes a slab object, i.e. a particular slab tiled along xyz, in -one of 2 ways: -- Pass in a Bulk object and a slab 5-tuple containing -(atoms, miller, shift, top, oriented bulk). -- Pass in a Bulk object and randomly sample a slab.

-
-
Parameters:
-
    -
  • bulk (Bulk) – Corresponding Bulk object.

  • -
  • slab_atoms (ase.Atoms) – Slab atoms, tiled and tagged

  • -
  • millers (tuple) – Miller indices of slab.

  • -
  • shift (float) – Shift of slab.

  • -
  • top (bool) – Whether slab is top or bottom.

  • -
  • min_ab (float) – To confirm that the tiled structure spans this distance

  • -
-
-
-
-
-classmethod from_bulk_get_random_slab(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_bulk_get_specific_millers(specific_millers, bulk=None, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_bulk_get_all_slabs(bulk=None, max_miller=2, min_ab=8.0, save_path=None)#
-
- -
-
-classmethod from_precomputed_slabs_pkl(bulk=None, precomputed_slabs_pkl=None, max_miller=2, min_ab=8.0)#
-
- -
-
-classmethod from_atoms(atoms: ase.Atoms = None, bulk=None, **kwargs)#
-
- -
-
-has_surface_tagged()#
-
- -
-
-get_metadata_dict()#
-
- -
-
-__len__()#
-
- -
-
-__str__()#
-

Return str(self).

-
- -
-
-__repr__()#
-

Return repr(self).

-
- -
-
-__eq__(other)#
-

Return self==value.

-
- -
- -
-
-fairchem.data.oc.core.slab.tile_and_tag_atoms(unit_slab_struct: pymatgen.core.structure.Structure, bulk_atoms: ase.Atoms, min_ab: float = 8)#
-

This function combines the next three functions that tile, tag, -and constrain the atoms.

-
-
Parameters:
-
    -
  • unit_slab_struct (Structure) – The untiled slab structure

  • -
  • bulk_atoms (ase.Atoms) – Atoms of the corresponding bulk structure, used for tagging

  • -
  • min_ab (float) – The minimum distance in x and y spanned by the tiled structure.

  • -
-
-
Returns:
-

atoms_tiled – A copy of the slab atoms that is tiled, tagged, and constrained

-
-
Return type:
-

ase.Atoms

-
-
-
- -
-
-fairchem.data.oc.core.slab.set_fixed_atom_constraints(atoms)#
-

This function fixes sub-surface atoms of a surface. Also works on systems -that have surface + adsorbate(s), as long as the bulk atoms are tagged with -0, surface atoms are tagged with 1, and the adsorbate atoms are tagged -with 2 or above.

-

This is used for both surface atoms and the combined surface+adsorbate.

-
-
Parameters:
-

atoms (ase.Atoms) – Atoms object of the slab or slab+adsorbate system, with bulk atoms -tagged as 0, surface atoms tagged as 1, and adsorbate atoms tagged -as 2 or above.

-
-
Returns:
-

atoms – A deep copy of the atoms argument, but where the appropriate -atoms are constrained.

-
-
Return type:
-

ase.Atoms

-
-
-
- -
-
-fairchem.data.oc.core.slab.tag_surface_atoms(slab_atoms: ase.Atoms = None, bulk_atoms: ase.Atoms = None)#
-

Sets the tags of an ase.Atoms object. Any atom that we consider a “bulk” -atom will have a tag of 0, and any atom that we consider a “surface” atom -will have a tag of 1. We use a combination of Voronoi neighbor algorithms -(adapted from pymatgen.core.surface.Slab.get_surface_sites; see -https://pymatgen.org/pymatgen.core.surface.html) and a distance cutoff.

-
-
Parameters:
-
    -
  • slab_atoms (ase.Atoms) – The slab where you are trying to find surface sites.

  • -
  • bulk_atoms (ase.Atoms) – The bulk structure that the surface was cut from.

  • -
-
-
Returns:
-

slab_atoms – A copy of the slab atoms with the surface atoms tagged as 1.

-
-
Return type:
-

ase.Atoms

-
-
-
- -
-
-fairchem.data.oc.core.slab.tile_atoms(atoms: ase.Atoms, min_ab: float = 8)#
-

This function will repeat an atoms structure in the direction of the a and b -lattice vectors such that they are at least as wide as the min_ab constant.

-
-
Parameters:
-
    -
  • atoms (ase.Atoms) – The structure to tile.

  • -
  • min_ab (float) – The minimum distance in x and y spanned by the tiled structure.

  • -
-
-
Returns:
-

atoms_tiled – The tiled structure.

-
-
Return type:
-

ase.Atoms

-
-
-
- -
-
-fairchem.data.oc.core.slab.find_surface_atoms_by_height(surface_atoms)#
-

As discussed in the docstring for find_surface_atoms_with_voronoi, -sometimes we might accidentally tag a surface atom as a bulk atom if there -are multiple coordination environments for that atom type within the bulk. -One heuristic that we use to address this is to simply figure out if an -atom is close to the surface. This function will figure that out.

-

Specifically: We consider an atom a surface atom if it is within 2 -Angstroms of the heighest atom in the z-direction (or more accurately, the -direction of the 3rd unit cell vector).

-
-
Parameters:
-

surface_atoms (ase.Atoms)

-
-
Returns:
-

tags – A list that contains the indices of the surface atoms.

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.data.oc.core.slab.find_surface_atoms_with_voronoi_given_height(bulk_atoms, slab_atoms, height_tags)#
-

Labels atoms as surface or bulk atoms according to their coordination -relative to their bulk structure. If an atom’s coordination is less than it -normally is in a bulk, then we consider it a surface atom. We calculate the -coordination using pymatgen’s Voronoi algorithms.

-

Note that if a single element has different sites within a bulk and these -sites have different coordinations, then we consider slab atoms -“under-coordinated” only if they are less coordinated than the most under -undercoordinated bulk atom. For example: Say we have a bulk with two Cu -sites. One site has a coordination of 12 and another a coordination of 9. -If a slab atom has a coordination of 10, we will consider it a bulk atom.

-
-
Parameters:
-
    -
  • bulk_atoms (ase.Atoms) – The bulk structure that the surface was cut from.

  • -
  • slab_atoms (ase.Atoms) – The slab structure.

  • -
  • height_tags (list) – The tags determined by the find_surface_atoms_by_height algo.

  • -
-
-
Returns:
-

tags – A list of 0s and 1s whose indices align with the atoms in -slab_atoms. 0s indicate a bulk atom and 1 indicates a surface atom.

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.data.oc.core.slab.calculate_center_of_mass(struct)#
-

Calculates the center of mass of the slab.

-
- -
-
-fairchem.data.oc.core.slab.calculate_coordination_of_bulk_atoms(bulk_atoms)#
-

Finds all unique atoms in a bulk structure and then determines their -coordination number. Then parses these coordination numbers into a -dictionary whose keys are the elements of the atoms and whose values are -their possible coordination numbers. -For example: bulk_cns = {‘Pt’: {3., 12.}, ‘Pd’: {12.}}

-
-
Parameters:
-

bulk_atoms (ase.Atoms) – The bulk structure.

-
-
Returns:
-

bulk_cn_dict – A dictionary whose keys are the elements of the atoms and whose values -are their possible coordination numbers.

-
-
Return type:
-

dict

-
-
-
- -
-
-fairchem.data.oc.core.slab.compute_slabs(bulk_atoms: ase.Atoms = None, max_miller: int = 2, specific_millers: list = None)#
-

Enumerates all the symmetrically distinct slabs of a bulk structure. -It will not enumerate slabs with Miller indices above the -max_miller argument. Note that we also look at the bottoms of slabs -if they are distinct from the top. If they are distinct, we flip the -surface so the bottom is pointing upwards.

-
-
Parameters:
-
    -
  • bulk_atoms (ase.Atoms) – The bulk structure.

  • -
  • max_miller (int) – The maximum Miller index of the slabs to enumerate. Increasing this -argument will increase the number of slabs, and the slabs will generally -become larger.

  • -
  • specific_millers (list) – A list of Miller indices that you want to enumerate. If this argument -is not None, then the max_miller argument is ignored.

  • -
-
-
Returns:
-

all_slabs_info – A list of 5-tuples containing pymatgen structure objects for enumerated -slabs, the Miller indices, floats for the shifts, booleans for top, and -the oriented bulk structure.

-
-
Return type:
-

list

-
-
-
- -
-
-fairchem.data.oc.core.slab.flip_struct(struct: pymatgen.core.structure.Structure)#
-

Flips an atoms object upside down. Normally used to flip slabs.

-
-
Parameters:
-

struct (Structure) – pymatgen structure object of the surface you want to flip

-
-
Returns:
-

flipped_struct – pymatgen structure object of the flipped surface.

-
-
Return type:
-

Structure

-
-
-
- -
-
-fairchem.data.oc.core.slab.is_structure_invertible(struct: pymatgen.core.structure.Structure)#
-

This function figures out whether or not an Structure -object has symmetricity. In this function, the affine matrix is a rotation -matrix that is multiplied with the XYZ positions of the crystal. If the z,z -component of that is negative, it means symmetry operation exist, it could -be a mirror operation, or one that involves multiple rotations/etc. -Regardless, it means that the top becomes the bottom and vice-versa, and the -structure is the symmetric. i.e. structure_XYZ = structure_XYZ*M.

-

In short: If this function returns False, then the input structure can -be flipped in the z-direction to create a new structure.

-
-
Parameters:
-

struct (Structure) – pymatgen structure object of the slab.

-
-
Returns:
-

    -
  • A boolean indicating whether or not your ase.Atoms object is

  • -
  • symmetric in z-direction (i.e. symmetric with respect to x-y plane).

  • -
-

-
-
-
- -
-
-fairchem.data.oc.core.slab.standardize_bulk(atoms: ase.Atoms)#
-

There are many ways to define a bulk unit cell. If you change the unit -cell itself but also change the locations of the atoms within the unit -cell, you can effectively get the same bulk structure. To address this, -there is a standardization method used to reduce the degrees of freedom -such that each unit cell only has one “true” configuration. This -function will align a unit cell you give it to fit within this -standardization.

-
-
Parameters:
-

atoms (ase.Atoms) – ase.Atoms object of the bulk you want to standardize.

-
-
Returns:
-

standardized_struct – pymatgen structure object of the standardized bulk.

-
-
Return type:
-

Structure

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/databases/index.html b/autoapi/fairchem/data/oc/databases/index.html deleted file mode 100644 index 330e388cb..000000000 --- a/autoapi/fairchem/data/oc/databases/index.html +++ /dev/null @@ -1,629 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.databases — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.databases

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.databases#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/databases/pkls/index.html b/autoapi/fairchem/data/oc/databases/pkls/index.html deleted file mode 100644 index 56e566961..000000000 --- a/autoapi/fairchem/data/oc/databases/pkls/index.html +++ /dev/null @@ -1,632 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.databases.pkls — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.databases.pkls

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.databases.pkls#

-
-

Package Contents#

-
-
-fairchem.data.oc.databases.pkls.BULK_PKL_PATH#
-
- -
-
-fairchem.data.oc.databases.pkls.ADSORBATES_PKL_PATH#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/databases/update/index.html b/autoapi/fairchem/data/oc/databases/update/index.html deleted file mode 100644 index 4991ac768..000000000 --- a/autoapi/fairchem/data/oc/databases/update/index.html +++ /dev/null @@ -1,673 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.databases.update — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.databases.update

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.databases.update#

-

Script for updating ase pkl and db files from v3.19 to v3.21. -Run it with ase v3.19.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - -

pbc_patch(self)

set_pbc_patch(self, pbc)

update_pkls()

update_dbs()

-
-
-fairchem.data.oc.databases.update.pbc_patch(self)#
-
- -
-
-fairchem.data.oc.databases.update.set_pbc_patch(self, pbc)#
-
- -
-
-fairchem.data.oc.databases.update.update_pkls()#
-
- -
-
-fairchem.data.oc.databases.update.update_dbs()#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/experimental/get_energies/index.html b/autoapi/fairchem/data/oc/experimental/get_energies/index.html deleted file mode 100644 index 6544956cd..000000000 --- a/autoapi/fairchem/data/oc/experimental/get_energies/index.html +++ /dev/null @@ -1,672 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.experimental.get_energies — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.experimental.get_energies

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.experimental.get_energies#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

extract_file(zipname, file_to_unzip, extract_to)

process_func(indices, dirlist, ans)

-
-
-

Attributes#

- - - - - - -

input_folder

-
-
-fairchem.data.oc.experimental.get_energies.extract_file(zipname, file_to_unzip, extract_to)#
-
- -
-
-fairchem.data.oc.experimental.get_energies.process_func(indices, dirlist, ans)#
-
- -
-
-fairchem.data.oc.experimental.get_energies.input_folder = 'temp_download/'#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/experimental/merge_traj/index.html b/autoapi/fairchem/data/oc/experimental/merge_traj/index.html deleted file mode 100644 index 7e925f368..000000000 --- a/autoapi/fairchem/data/oc/experimental/merge_traj/index.html +++ /dev/null @@ -1,655 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.experimental.merge_traj — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.experimental.merge_traj

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.experimental.merge_traj#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

extract_file(zipname, file_to_unzip, extract_to)

main()

Given a directory containing adsorbate subdirectories, loops through all

-
-
-fairchem.data.oc.experimental.merge_traj.extract_file(zipname, file_to_unzip, extract_to)#
-
- -
-
-fairchem.data.oc.experimental.merge_traj.main()#
-

Given a directory containing adsorbate subdirectories, loops through all -runs and merges intermediate checkpoints into a single, full trajectory.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/experimental/perturb_systems/index.html b/autoapi/fairchem/data/oc/experimental/perturb_systems/index.html deleted file mode 100644 index a5a807820..000000000 --- a/autoapi/fairchem/data/oc/experimental/perturb_systems/index.html +++ /dev/null @@ -1,646 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.experimental.perturb_systems — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.experimental.perturb_systems

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.experimental.perturb_systems#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

main()

Rattles every image along a relaxation pathway at 5 different variances.

-
-
-fairchem.data.oc.experimental.perturb_systems.main()#
-

Rattles every image along a relaxation pathway at 5 different variances. -Rattled images are then put in their own directory along with the input -files necessary to run VASP calculations.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/experimental/rattle_test/index.html b/autoapi/fairchem/data/oc/experimental/rattle_test/index.html deleted file mode 100644 index 9e2a6c49d..000000000 --- a/autoapi/fairchem/data/oc/experimental/rattle_test/index.html +++ /dev/null @@ -1,645 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.experimental.rattle_test — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.experimental.rattle_test

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.experimental.rattle_test#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

main()

Checks whether ASE's rattle modifies fixed atoms.

-
-
-fairchem.data.oc.experimental.rattle_test.main()#
-

Checks whether ASE’s rattle modifies fixed atoms. -‘

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/experimental/utils/index.html b/autoapi/fairchem/data/oc/experimental/utils/index.html deleted file mode 100644 index e11cfc998..000000000 --- a/autoapi/fairchem/data/oc/experimental/utils/index.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.experimental.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.experimental.utils

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.experimental.utils#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

v0_check(full_traj, initial)

Checks whether the initial structure as gathered from the POSCAR input file

restart_bug_check(full_traj)

Observed that some of the trajectories had a strange identically cyclical

plot_traj(traj, fname)

Plots the energy profile of a given trajectory

-
-
-fairchem.data.oc.experimental.utils.v0_check(full_traj, initial)#
-

Checks whether the initial structure as gathered from the POSCAR input file -is in agreement with the initial image of the full trajectory. If not, the -trajectory comes fro the V0 dataset which failed to save intermediate -checkpoints.

-

Args -full_traj (list of Atoms objects): Calculated full trajectory. -initial (Atoms object): Starting image provided by POSCAR..

-
- -
-
-fairchem.data.oc.experimental.utils.restart_bug_check(full_traj)#
-

Observed that some of the trajectories had a strange identically cyclical -behavior - suggesting that a checkpoint was restarted from an earlier -checkpoint rather than the latest. Checks whether the trajectory provided -falls within that bug.

-

Args -full_traj (list of Atoms objects): Calculated full trajectory.

-
- -
-
-fairchem.data.oc.experimental.utils.plot_traj(traj, fname)#
-

Plots the energy profile of a given trajectory

-

Args -traj (list of Atoms objects): Full trajectory to be plotted -fname (str): Filename to be used as title and save figure as.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/index.html b/autoapi/fairchem/data/oc/index.html deleted file mode 100644 index 00c9c2aab..000000000 --- a/autoapi/fairchem/data/oc/index.html +++ /dev/null @@ -1,648 +0,0 @@ - - - - - - - - - - - fairchem.data.oc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.html b/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.html deleted file mode 100644 index 55d95710b..000000000 --- a/autoapi/fairchem/data/oc/scripts/precompute_sample_structures/index.html +++ /dev/null @@ -1,797 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.scripts.precompute_sample_structures — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.scripts.precompute_sample_structures

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.scripts.precompute_sample_structures#

-

This submodule contains the scripts that the we used to sample the adsorption -structures.

-

Note that some of these scripts were taken from -[GASpy](ulissigroup/GASpy) with permission of author.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

enumerate_surfaces_for_saving(bulk_atoms[, max_miller])

Enumerate all the symmetrically distinct surfaces of a bulk structure. It

standardize_bulk(atoms)

There are many ways to define a bulk unit cell. If you change the unit cell

is_structure_invertible(structure)

This function figures out whether or not an pymatgen.Structure object has

flip_struct(struct)

Flips an atoms object upside down. Normally used to flip surfaces.

precompute_enumerate_surface(bulk_database, ...)

-
-
-

Attributes#

- - - - - - - - - - - - -

__authors__

__email__

s

-
-
-fairchem.data.oc.scripts.precompute_sample_structures.__authors__ = ['Kevin Tran', 'Aini Palizhati', 'Siddharth Goyal', 'Zachary Ulissi']#
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.__email__ = ['ktran@andrew.cmu.edu']#
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.enumerate_surfaces_for_saving(bulk_atoms, max_miller=MAX_MILLER)#
-

Enumerate all the symmetrically distinct surfaces of a bulk structure. It -will not enumerate surfaces with Miller indices above the max_miller -argument. Note that we also look at the bottoms of surfaces if they are -distinct from the top. If they are distinct, we flip the surface so the bottom -is pointing upwards.

-
-
Parameters:
-
    -
  • enumerate (bulk_atoms ase.Atoms object of the bulk you want to) – surfaces from.

  • -
  • surfaces (max_miller An integer indicating the maximum Miller index of the) – you are willing to enumerate. Increasing this argument will -increase the number of surfaces, but the surfaces will -generally become larger.

  • -
-
-
Returns:
-

-
pymatgen.Structure

objects for surfaces we have enumerated, the Miller -indices, floats for the shifts, and Booleans for “top”.

-
-
-

-
-
Return type:
-

all_slabs_info A list of 4-tuples containing

-
-
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.standardize_bulk(atoms)#
-

There are many ways to define a bulk unit cell. If you change the unit cell -itself but also change the locations of the atoms within the unit cell, you -can get effectively the same bulk structure. To address this, there is a -standardization method used to reduce the degrees of freedom such that each -unit cell only has one “true” configuration. This function will align a -unit cell you give it to fit within this standardization.

-
-
Arg:

atoms ase.Atoms object of the bulk you want to standardize

-
-
-
-
Returns:
-

standardized_struct pymatgen.Structure of the standardized bulk

-
-
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.is_structure_invertible(structure)#
-

This function figures out whether or not an pymatgen.Structure object has -symmetricity. In this function, the affine matrix is a rotation matrix that -is multiplied with the XYZ positions of the crystal. If the z,z component -of that is negative, it means symmetry operation exist, it could be a -mirror operation, or one that involves multiple rotations/etc. Regardless, -it means that the top becomes the bottom and vice-versa, and the structure -is the symmetric. i.e. structure_XYZ = structure_XYZ*M.

-

In short: If this function returns False, then the input structure can -be flipped in the z-direction to create a new structure.

-
-
Arg:

structure A pymatgen.Structure object.

-
-
Returns

A boolean indicating whether or not your ase.Atoms object is -symmetric in z-direction (i.e. symmetric with respect to x-y plane).

-
-
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.flip_struct(struct)#
-

Flips an atoms object upside down. Normally used to flip surfaces.

-
-
Arg:

atoms pymatgen.Structure object

-
-
-
-
Returns:
-

-
flipped_struct The same ase.Atoms object that was fed as an

argument, but flipped upside down.

-
-
-

-
-
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.precompute_enumerate_surface(bulk_database, bulk_index, opfile)#
-
- -
-
-fairchem.data.oc.scripts.precompute_sample_structures.s#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/structure_generator/index.html b/autoapi/fairchem/data/oc/structure_generator/index.html deleted file mode 100644 index f87668ddd..000000000 --- a/autoapi/fairchem/data/oc/structure_generator/index.html +++ /dev/null @@ -1,775 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.structure_generator — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.structure_generator#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

StructureGenerator

A class that creates adsorbate/bulk/slab objects given specified indices,

-
-
-

Functions#

- - - - - - - - - - - - - - - -

write_surface(args, slab, bulk_index, surface_index)

Writes vasp inputs and metadata for a specified slab

parse_args()

precompute_slabs(bulk_ind)

run_placements(inputs)

-
-
-

Attributes#

- - - - - - -

args

-
-
-class fairchem.data.oc.structure_generator.StructureGenerator(args, bulk_index, surface_index, adsorbate_index)#
-

A class that creates adsorbate/bulk/slab objects given specified indices, -and writes vasp input files and metadata for multiple placements of the adsorbate -on the slab. You can choose random, heuristic, or both types of placements.

-

The output directory structure will have the following nested structure, -where “files” represents the vasp input files and the metadata.pkl:

-
-
-
outputdir/
-
bulk0/
-
surface0/

surface/files -ads0/

-
-

heur0/files -heur1/files -rand0/files -…

-
-
-
ads1/

-
-
-
-
surface1/

-
-
-
-
bulk1/

-
-
-
-
-
-

Precomputed surfaces will be calculated and saved out if they don’t -already exist in the provided directory.

-
-
Parameters:
-
    -
  • args (argparse.Namespace) – Contains all command line args

  • -
  • bulk_index (int) – Index of the bulk within the bulk db

  • -
  • surface_index (int) – Index of the surface in the list of all possible surfaces

  • -
  • adsorbate_index (int) – Index of the adsorbate within the adsorbate db

  • -
-
-
-
-
-run()#
-

Create adsorbate/bulk/surface objects, generate adslab placements, -and write to files.

-
- -
-
-_write_adslabs(adslab_obj, mode_str)#
-

Write one set of adslabs (called separately for random and heurstic placements)

-
- -
- -
-
-fairchem.data.oc.structure_generator.write_surface(args, slab, bulk_index, surface_index)#
-

Writes vasp inputs and metadata for a specified slab

-
- -
-
-fairchem.data.oc.structure_generator.parse_args()#
-
- -
-
-fairchem.data.oc.structure_generator.precompute_slabs(bulk_ind)#
-
- -
-
-fairchem.data.oc.structure_generator.run_placements(inputs)#
-
- -
-
-fairchem.data.oc.structure_generator.args#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.html b/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.html deleted file mode 100644 index 826a027e3..000000000 --- a/autoapi/fairchem/data/oc/tests/old_tests/check_energy_and_forces/index.html +++ /dev/null @@ -1,731 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.old_tests.check_energy_and_forces — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.old_tests.check_energy_and_forces

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.old_tests.check_energy_and_forces#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - -

check_relaxed_forces(sid, path, thres)

Check all forces in the final frame of adslab is less than a threshold.

check_adsorption_energy(sid, path, ref_energy, ...)

check_DFT_energy(sid, path[, e_tol])

Given a relaxation trajectory, check to see if 1. final energy is less than the initial

check_positions_across_frames_are_different(sid, path)

Given a relaxation trajectory, make sure positions for two consecutive

read_pkl(fname)

run_checks(args)

create_parser()

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_relaxed_forces(sid, path, thres)#
-

Check all forces in the final frame of adslab is less than a threshold.

-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_adsorption_energy(sid, path, ref_energy, adsorption_energy)#
-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_DFT_energy(sid, path, e_tol=0.05)#
-

Given a relaxation trajectory, check to see if 1. final energy is less than the initial -energy, raise error if not. 2) If the energy decreases throuhghout a trajectory (small spikes are okay). -And 3) if 2 fails, check if it’s just a matter of tolerance being too strict by -considering only the first quarter of the trajectory and sampling every 10th frame -to check for _almost_ monotonic decrease in energies. -If any frame(i+1) energy is higher than frame(i) energy, flag it and plot the trajectory.

-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.check_positions_across_frames_are_different(sid, path)#
-

Given a relaxation trajectory, make sure positions for two consecutive -frames are not identical.

-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.read_pkl(fname)#
-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.run_checks(args)#
-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.create_parser()#
-
- -
-
-fairchem.data.oc.tests.old_tests.check_energy_and_forces.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.html b/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.html deleted file mode 100644 index 194251c3b..000000000 --- a/autoapi/fairchem/data/oc/tests/old_tests/check_inputs/index.html +++ /dev/null @@ -1,759 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.old_tests.check_inputs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.old_tests.check_inputs

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.old_tests.check_inputs#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - -

obtain_metadata(input_dir, split)

Get the metadata provided input directory and split of data.

create_df(metadata_lst[, df_name])

Create a df from metadata to used check_dataset.py file

adslabs_are_unique(df[, unique_by])

Test if there are duplicate adslabs given a df. If the input is another

check_commonelems(df, split1, split2[, check])

Given a df containing all the metadata of the calculations, check to see if there are

is_adsorbate_placed_correct(adslab_input, atoms_tag)

Make sure all adsorbate atoms are connected after placement.

_get_connectivity(atoms)

Generate the connectivity of an atoms obj.

-
-
-fairchem.data.oc.tests.old_tests.check_inputs.obtain_metadata(input_dir, split)#
-

Get the metadata provided input directory and split of data. -:param input_dir: -:type input_dir: str -:param split: ‘val_ood_cat/ads/both’, and ‘test_ood_cat/ads/both’. -:type split: str

-
-
Returns:
-

-
metadata (tuple) adslab properties.

Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

-
-
-

-
-
-
- -
-
-fairchem.data.oc.tests.old_tests.check_inputs.create_df(metadata_lst, df_name=None)#
-

Create a df from metadata to used check_dataset.py file -:param metadata_lst A list of adslab properties in tuple form: contain (mpid, miller index, shift, top, adsorbate smile string,

-
-

adsorption cartesion coordinates tuple, and which split the data belongs to). -Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

-
-
-
Parameters:
-

should (each tuple) – contain (mpid, miller index, shift, top, adsorbate smile string, -adsorption cartesion coordinates tuple, and which split the data belongs to). -Ex: (‘mp-126’, (1,1,1), 0.025, True, ‘*OH’, (0,0,0), ‘val_ood_ads’)

-
-
Returns:
-

df A pandas DataFrame

-
-
-
- -
-
-fairchem.data.oc.tests.old_tests.check_inputs.adslabs_are_unique(df, unique_by=['mpid', 'miller', 'shift', 'top', 'adsorbate', 'adsorption_site'])#
-

Test if there are duplicate adslabs given a df. If the input is another -format, convert it to df first. -:param df A pd.DataFrame containing metadata of the adslabs being checked.: -:param unique_by df column names that are used to detect duplicates. The default: list is the fingerprints represent a unique adslab.

-
- -
-
-fairchem.data.oc.tests.old_tests.check_inputs.check_commonelems(df, split1, split2, check='adsorbate')#
-

Given a df containing all the metadata of the calculations, check to see if there are -any bulk or adsorbate duplicates between train and val/test_ood. The dataframe should -have a “split_tag” column indicate which split (i.e. train, val_ood_ads, etc) a data belongs to. -:param df A pd.DataFrame containing metadata of the adslabs being checked.: -:param split1: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. -:param split2 two of the splits from ‘train’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. -:param ‘val_id’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. -:param ‘test_id’: ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’. -:param : ‘val_ood_cat/ads/both’, or ‘test_ood_cat/ads/both’.

-
- -
-
-fairchem.data.oc.tests.old_tests.check_inputs.is_adsorbate_placed_correct(adslab_input, atoms_tag)#
-

Make sure all adsorbate atoms are connected after placement. -False means there is at least one isolated adsorbate atom. -It should be used after input generation but before DFT to avoid -unneccessarily computations. -:param adslab_input ase.Atoms of the structure in its initial state: -:param atoms_tag: -:type atoms_tag: list

-
-
Returns:
-

-
boolean If there is any stand alone adsorbate atoms after placement,

return False.

-
-
-

-
-
-
- -
-
-fairchem.data.oc.tests.old_tests.check_inputs._get_connectivity(atoms)#
-

Generate the connectivity of an atoms obj. -:param atoms An ase.Atoms object:

-
-
Returns:
-

matrix The connectivity matrix of the atoms object.

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html b/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html deleted file mode 100644 index 7462c68ef..000000000 --- a/autoapi/fairchem/data/oc/tests/old_tests/compare_inputs_and_trajectory/index.html +++ /dev/null @@ -1,704 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - -

get_starting_structure_from_input_dir(input_dir)

min_diff(atoms_init, atoms_final)

Calculate atom wise distances of two atoms object,

compare(args)

read_pkl(fname)

create_parser()

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.get_starting_structure_from_input_dir(input_dir)#
-
- -
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.min_diff(atoms_init, atoms_final)#
-

Calculate atom wise distances of two atoms object, -taking into account periodic boundary conditions.

-
- -
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.compare(args)#
-
- -
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.read_pkl(fname)#
-
- -
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.create_parser()#
-
- -
-
-fairchem.data.oc.tests.old_tests.compare_inputs_and_trajectory.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.html b/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.html deleted file mode 100644 index d81d341be..000000000 --- a/autoapi/fairchem/data/oc/tests/old_tests/verify_correctness/index.html +++ /dev/null @@ -1,682 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.old_tests.verify_correctness — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.old_tests.verify_correctness

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.oc.tests.old_tests.verify_correctness#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

compare_runs(path1, path2, reference_type, tol)

create_parser()

main(args)

-
-
-

Attributes#

- - - - - - -

parser

-
-
-fairchem.data.oc.tests.old_tests.verify_correctness.compare_runs(path1, path2, reference_type, tol)#
-
- -
-
-fairchem.data.oc.tests.old_tests.verify_correctness.create_parser()#
-
- -
-
-fairchem.data.oc.tests.old_tests.verify_correctness.main(args)#
-
- -
-
-fairchem.data.oc.tests.old_tests.verify_correctness.parser#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_adsorbate/index.html b/autoapi/fairchem/data/oc/tests/test_adsorbate/index.html deleted file mode 100644 index 33e973988..000000000 --- a/autoapi/fairchem/data/oc/tests/test_adsorbate/index.html +++ /dev/null @@ -1,732 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_adsorbate — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.tests.test_adsorbate#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestAdsorbate

-
-
-

Attributes#

- - - - - - - - - -

_test_db

_test_db_old

-
-
-fairchem.data.oc.tests.test_adsorbate._test_db#
-
- -
-
-fairchem.data.oc.tests.test_adsorbate._test_db_old#
-
- -
-
-class fairchem.data.oc.tests.test_adsorbate.TestAdsorbate#
-
-
-test_adsorbate_init_from_id()#
-
- -
-
-test_adsorbate_init_from_smiles()#
-
- -
-
-test_adsorbate_init_random()#
-
- -
-
-test_adsorbate_init_from_id_with_db()#
-
- -
-
-test_adsorbate_init_from_smiles_with_db()#
-
- -
-
-test_adsorbate_init_random_with_db()#
-
- -
-
-test_adsorbate_init_reaction_string()#
-
- -
-
-test_adsorbate_init_reaction_string_with_old_db()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.html b/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.html deleted file mode 100644 index eca8f7882..000000000 --- a/autoapi/fairchem/data/oc/tests/test_adsorbate_slab_config/index.html +++ /dev/null @@ -1,702 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_adsorbate_slab_config — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.tests.test_adsorbate_slab_config#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestAdslab

-
-
-

Functions#

- - - - - - -

load_data(request)

-
-
-fairchem.data.oc.tests.test_adsorbate_slab_config.load_data(request)#
-
- -
-
-class fairchem.data.oc.tests.test_adsorbate_slab_config.TestAdslab#
-
-
-test_adslab_init()#
-
- -
-
-test_num_augmentations_per_site()#
-
- -
-
-test_placement_overlap()#
-

Test that the adsorbate does not overlap with the slab.

-
- -
-
-test_is_adsorbate_com_on_normal()#
-
- -
-
-test_is_adsorbate_binding_atom_on_normal()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_bulk/index.html b/autoapi/fairchem/data/oc/tests/test_bulk/index.html deleted file mode 100644 index af056e4e9..000000000 --- a/autoapi/fairchem/data/oc/tests/test_bulk/index.html +++ /dev/null @@ -1,755 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_bulk — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.tests.test_bulk#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestBulk

-
-
-

Functions#

- - - - - - -

load_bulk(request)

-
-
-

Attributes#

- - - - - - -

_test_db

-
-
-fairchem.data.oc.tests.test_bulk.load_bulk(request)#
-
- -
-
-fairchem.data.oc.tests.test_bulk._test_db#
-
- -
-
-class fairchem.data.oc.tests.test_bulk.TestBulk#
-
-
-test_bulk_init_from_id()#
-
- -
-
-test_bulk_init_from_src_id()#
-
- -
-
-test_bulk_init_random()#
-
- -
-
-test_bulk_init_from_id_with_db()#
-
- -
-
-test_bulk_init_from_src_id_with_db()#
-
- -
-
-test_bulk_init_random_with_db()#
-
- -
-
-test_unique_slab_enumeration()#
-
- -
-
-test_precomputed_slab()#
-
- -
-
-test_slab_miller_enumeration()#
-
- -
-
-get_max_miller(slabs)#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_inputs/index.html b/autoapi/fairchem/data/oc/tests/test_inputs/index.html deleted file mode 100644 index 99972ec14..000000000 --- a/autoapi/fairchem/data/oc/tests/test_inputs/index.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_inputs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.test_inputs

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.test_inputs#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestVasp

-
-
-

Functions#

- - - - - - -

load_data(request)

-
-
-fairchem.data.oc.tests.test_inputs.load_data(request)#
-
- -
-
-class fairchem.data.oc.tests.test_inputs.TestVasp#
-
-
-test_cleanup()#
-
- -
-
-test_unique_kpts()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.html b/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.html deleted file mode 100644 index e3f0690d4..000000000 --- a/autoapi/fairchem/data/oc/tests/test_multi_adsorbate_slab_config/index.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_multi_adsorbate_slab_config — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.test_multi_adsorbate_slab_config

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.test_multi_adsorbate_slab_config#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestMultiAdslab

-
-
-

Functions#

- - - - - - -

load_data(request)

-
-
-fairchem.data.oc.tests.test_multi_adsorbate_slab_config.load_data(request)#
-
- -
-
-class fairchem.data.oc.tests.test_multi_adsorbate_slab_config.TestMultiAdslab#
-
-
-test_num_configurations()#
-
- -
-
-test_adsorbate_indices()#
-

Test that the adsorbate indices correspond to the unique adsorbates.

-
- -
-
-test_placement_overlap()#
-

Test that the adsorbate sites do not overlap with each other.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/tests/test_slab/index.html b/autoapi/fairchem/data/oc/tests/test_slab/index.html deleted file mode 100644 index 3268c9454..000000000 --- a/autoapi/fairchem/data/oc/tests/test_slab/index.html +++ /dev/null @@ -1,668 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.tests.test_slab — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.oc.tests.test_slab

- -
- -
-
- - - - -
- -
-

fairchem.data.oc.tests.test_slab#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestSlab

-
-
-class fairchem.data.oc.tests.test_slab.TestSlab#
-
-
-test_slab_init_from_id()#
-
- -
-
-test_slab_init_from_specific_millers()#
-
- -
-
-test_slab_init_random()#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/utils/flag_anomaly/index.html b/autoapi/fairchem/data/oc/utils/flag_anomaly/index.html deleted file mode 100644 index 126bee69b..000000000 --- a/autoapi/fairchem/data/oc/utils/flag_anomaly/index.html +++ /dev/null @@ -1,735 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.utils.flag_anomaly — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.utils.flag_anomaly#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

DetectTrajAnomaly

-
-
-class fairchem.data.oc.utils.flag_anomaly.DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5)#
-
-
-is_adsorbate_dissociated()#
-

Tests if the initial adsorbate connectivity is maintained.

-
-
Returns:
-

True if the connectivity was not maintained, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-has_surface_changed()#
-

Tests bond breaking / forming events within a tolerance on the surface so -that systems with significant adsorbate induces surface changes may be discarded -since the reference to the relaxed slab may no longer be valid.

-
-
Returns:
-

True if the surface is reconstructed, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-is_adsorbate_desorbed()#
-

If the adsorbate binding atoms have no connection with slab atoms, -consider it desorbed.

-
-
Returns:
-

True if there is desorption, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-_get_connectivity(atoms, cutoff_multiplier=1.0)#
-

Generate the connectivity of an atoms obj.

-
-
Parameters:
-
    -
  • atoms (ase.Atoms) – object which will have its connectivity considered

  • -
  • cutoff_multiplier (float, optional) – cushion for small atom movements when assessing -atom connectivity

  • -
-
-
Returns:
-

The connectivity matrix of the atoms object.

-
-
Return type:
-

(np.ndarray)

-
-
-
- -
-
-is_adsorbate_intercalated()#
-

Ensure the adsorbate isn’t interacting with an atom that is not allowed to relax.

-
-
Returns:
-

True if any adsorbate atom neighbors a frozen atom, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/utils/index.html b/autoapi/fairchem/data/oc/utils/index.html deleted file mode 100644 index 72feb5b5d..000000000 --- a/autoapi/fairchem/data/oc/utils/index.html +++ /dev/null @@ -1,746 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.utils — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.utils#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - -

DetectTrajAnomaly

-
-
-class fairchem.data.oc.utils.DetectTrajAnomaly(init_atoms, final_atoms, atoms_tag, final_slab_atoms=None, surface_change_cutoff_multiplier=1.5, desorption_cutoff_multiplier=1.5)#
-
-
-is_adsorbate_dissociated()#
-

Tests if the initial adsorbate connectivity is maintained.

-
-
Returns:
-

True if the connectivity was not maintained, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-has_surface_changed()#
-

Tests bond breaking / forming events within a tolerance on the surface so -that systems with significant adsorbate induces surface changes may be discarded -since the reference to the relaxed slab may no longer be valid.

-
-
Returns:
-

True if the surface is reconstructed, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-is_adsorbate_desorbed()#
-

If the adsorbate binding atoms have no connection with slab atoms, -consider it desorbed.

-
-
Returns:
-

True if there is desorption, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
-
-_get_connectivity(atoms, cutoff_multiplier=1.0)#
-

Generate the connectivity of an atoms obj.

-
-
Parameters:
-
    -
  • atoms (ase.Atoms) – object which will have its connectivity considered

  • -
  • cutoff_multiplier (float, optional) – cushion for small atom movements when assessing -atom connectivity

  • -
-
-
Returns:
-

The connectivity matrix of the atoms object.

-
-
Return type:
-

(np.ndarray)

-
-
-
- -
-
-is_adsorbate_intercalated()#
-

Ensure the adsorbate isn’t interacting with an atom that is not allowed to relax.

-
-
Returns:
-

True if any adsorbate atom neighbors a frozen atom, otherwise False

-
-
Return type:
-

(bool)

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/oc/utils/vasp/index.html b/autoapi/fairchem/data/oc/utils/vasp/index.html deleted file mode 100644 index 64b0602da..000000000 --- a/autoapi/fairchem/data/oc/utils/vasp/index.html +++ /dev/null @@ -1,754 +0,0 @@ - - - - - - - - - - - fairchem.data.oc.utils.vasp — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.oc.utils.vasp#

-

This submodule contains the scripts that the we used to run VASP.

-

Note that some of these scripts were taken and modified from -[GASpy](ulissigroup/GASpy) with permission of authors.

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

_clean_up_inputs(atoms, vasp_flags)

Parses the inputs and makes sure some things are straightened out.

calculate_surface_k_points(atoms)

For surface calculations, it's a good practice to calculate the k-point

write_vasp_input_files(atoms[, outdir, vasp_flags])

Effectively goes through the same motions as the run_vasp function,

-
-
-

Attributes#

- - - - - - - - - - - - - - - -

__author__

__email__

VASP_FLAGS

BULK_VASP_FLAGS

-
-
-fairchem.data.oc.utils.vasp.__author__ = 'Kevin Tran'#
-
- -
-
-fairchem.data.oc.utils.vasp.__email__ = 'ktran@andrew.cmu.edu'#
-
- -
-
-fairchem.data.oc.utils.vasp.VASP_FLAGS#
-
- -
-
-fairchem.data.oc.utils.vasp.BULK_VASP_FLAGS#
-
- -
-
-fairchem.data.oc.utils.vasp._clean_up_inputs(atoms, vasp_flags)#
-

Parses the inputs and makes sure some things are straightened out.

-
-
Arg:

atoms ase.Atoms object of the structure we want to relax -vasp_flags A dictionary of settings we want to pass to the Vasp

-
-

calculator

-
-
-
-
-
Returns:
-

-
atoms ase.Atoms object of the structure we want to relax, but

with the unit vectors fixed (if needed)

-
-
-

vasp_flags A modified version of the ‘vasp_flags’ argument

-

-
-
-
- -
-
-fairchem.data.oc.utils.vasp.calculate_surface_k_points(atoms)#
-

For surface calculations, it’s a good practice to calculate the k-point -mesh given the unit cell size. We do that on-the-spot here.

-
-
Arg:

atoms ase.Atoms object of the structure we want to relax

-
-
-
-
Returns:
-

k_pts A 3-tuple of integers indicating the k-point mesh to use

-
-
-
- -
-
-fairchem.data.oc.utils.vasp.write_vasp_input_files(atoms, outdir='.', vasp_flags=None)#
-

Effectively goes through the same motions as the run_vasp function, -except it only writes the input files instead of running.

-
-
Parameters:
-
    -
  • relax. (atoms ase.Atoms object that we want to)

  • -
  • files. (outdir A string indicating where you want to save the input) – Defaults to ‘.’

  • -
  • Vasp (vasp_flags A dictionary of settings we want to pass to the) – calculator. Defaults to a standerd set of values if None

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/odac/force_field/FF_analysis/index.html b/autoapi/fairchem/data/odac/force_field/FF_analysis/index.html deleted file mode 100644 index f32a4c367..000000000 --- a/autoapi/fairchem/data/odac/force_field/FF_analysis/index.html +++ /dev/null @@ -1,742 +0,0 @@ - - - - - - - - - - - fairchem.data.odac.force_field.FF_analysis — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.odac.force_field.FF_analysis

- -
- -
-
- - - - -
- -
-

fairchem.data.odac.force_field.FF_analysis#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

get_data(infile[, limit])

binned_average(DFT_ads, pred_err, bins)

bin_plot(ax, bins, heights, **kwargs)

get_Fig4a(raw_error_CO2, raw_error_H2O[, b, outfile])

get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O)

get_Fig4c(DFT_CO2, err_CO2[, outfile])

get_Fig4d(DFT_H2O, err_H2O[, outfile])

phys_err(DFT, FF)

chem_err(DFT, FF)

-
-
-

Attributes#

- - - - - - -

infile

-
-
-fairchem.data.odac.force_field.FF_analysis.get_data(infile, limit=2)#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.binned_average(DFT_ads, pred_err, bins)#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.bin_plot(ax, bins, heights, **kwargs)#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.get_Fig4a(raw_error_CO2, raw_error_H2O, b=20, outfile='Fig5a.png')#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.get_Fig4b(int_DFT_CO2, err_CO2, int_DFT_H2O, err_H2O, outfile='Fig5b.png')#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.get_Fig4c(DFT_CO2, err_CO2, outfile='Fig5c.png')#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.get_Fig4d(DFT_H2O, err_H2O, outfile='Fig5d.png')#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.phys_err(DFT, FF)#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.chem_err(DFT, FF)#
-
- -
-
-fairchem.data.odac.force_field.FF_analysis.infile = '/storage/home/hcoda1/8/lbrabson3/p-amedford6-0/s2ef/final/data_w_oms.json'#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.html b/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.html deleted file mode 100644 index e709dffd2..000000000 --- a/autoapi/fairchem/data/odac/promising_mof/promising_mof_energies/energy/index.html +++ /dev/null @@ -1,963 +0,0 @@ - - - - - - - - - - - fairchem.data.odac.promising_mof.promising_mof_energies.energy — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.data.odac.promising_mof.promising_mof_energies.energy#

-
-

Module Contents#

-
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.raw_ads_energy_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.temp_split_string#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_h2o#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co_ads#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_pristine_co_ads_2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_h2o#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co_ads#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.complete_data_merged_defective_co_ads_2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_h2o#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_2#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.count = 0#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co2_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_h2o_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.lowest_energy_data_co_ads_2_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.current_entry#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.unique_combinations_count#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.def_counts_df#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.mof_name#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC_pristine#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.missing_DDEC_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.index_drop_ddec_pristine = []#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.index_drop_ddec_defective = []#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.adsorption_data_defective#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.promising_pristine#
-
- -
-
-fairchem.data.odac.promising_mof.promising_mof_energies.energy.promising_defective#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.html b/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.html deleted file mode 100644 index 2a6645f97..000000000 --- a/autoapi/fairchem/data/om/biomolecules/geom/sample_geom_drugs/index.html +++ /dev/null @@ -1,663 +0,0 @@ - - - - - - - - - - - fairchem.data.om.biomolecules.geom.sample_geom_drugs — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.om.biomolecules.geom.sample_geom_drugs

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.om.biomolecules.geom.sample_geom_drugs#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - - - - -

write_pickle(data, path)

parse_args()

main()

-
-
-fairchem.data.om.biomolecules.geom.sample_geom_drugs.write_pickle(data, path)#
-
- -
-
-fairchem.data.om.biomolecules.geom.sample_geom_drugs.parse_args()#
-
- -
-
-fairchem.data.om.biomolecules.geom.sample_geom_drugs.main()#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.html b/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.html deleted file mode 100644 index 8dfac3425..000000000 --- a/autoapi/fairchem/data/om/biomolecules/geom/write_geom_drugs_structures/index.html +++ /dev/null @@ -1,653 +0,0 @@ - - - - - - - - - - - fairchem.data.om.biomolecules.geom.write_geom_drugs_structures — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.om.biomolecules.geom.write_geom_drugs_structures

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.om.biomolecules.geom.write_geom_drugs_structures#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

parse_args()

main()

-
-
-fairchem.data.om.biomolecules.geom.write_geom_drugs_structures.parse_args()#
-
- -
-
-fairchem.data.om.biomolecules.geom.write_geom_drugs_structures.main()#
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/om/omdata/orca/calc/index.html b/autoapi/fairchem/data/om/omdata/orca/calc/index.html deleted file mode 100644 index 367745b3f..000000000 --- a/autoapi/fairchem/data/om/omdata/orca/calc/index.html +++ /dev/null @@ -1,712 +0,0 @@ - - - - - - - - - - - fairchem.data.om.omdata.orca.calc — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.om.omdata.orca.calc

- -
- -
-
- - - - -
- -
-

fairchem.data.om.omdata.orca.calc#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

write_orca_inputs(atoms, output_directory[, charge, ...])

One-off method to be used if you wanted to write inputs for an arbitrary

-
-
-

Attributes#

- - - - - - - - - - - - - - - - - - - - - -

ORCA_FUNCTIONAL

ORCA_BASIS

ORCA_SIMPLE_INPUT

ORCA_BLOCKS

ORCA_ASE_SIMPLE_INPUT

OPT_PARAMETERS

-
-
-fairchem.data.om.omdata.orca.calc.ORCA_FUNCTIONAL = 'wB97M-V'#
-
- -
-
-fairchem.data.om.omdata.orca.calc.ORCA_BASIS = 'def2-TZVPD'#
-
- -
-
-fairchem.data.om.omdata.orca.calc.ORCA_SIMPLE_INPUT = ['EnGrad', 'RIJCOSX', 'def2/J', 'NoUseSym', 'DIIS', 'NOSOSCF', 'NormalConv', 'DEFGRID3', 'ALLPOP', 'NBO']#
-
- -
-
-fairchem.data.om.omdata.orca.calc.ORCA_BLOCKS = ['%scf Convergence Tight maxiter 300 end', '%elprop Dipole true Quadrupole true end', '%nbo...#
-
- -
-
-fairchem.data.om.omdata.orca.calc.ORCA_ASE_SIMPLE_INPUT#
-
- -
-
-fairchem.data.om.omdata.orca.calc.OPT_PARAMETERS#
-
- -
-
-fairchem.data.om.omdata.orca.calc.write_orca_inputs(atoms, output_directory, charge=0, mult=1, orcasimpleinput=ORCA_ASE_SIMPLE_INPUT, orcablocks=' '.join(ORCA_BLOCKS))#
-

One-off method to be used if you wanted to write inputs for an arbitrary -system. Primarily used for debugging.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/om/omdata/orca/index.html b/autoapi/fairchem/data/om/omdata/orca/index.html deleted file mode 100644 index d69858ee3..000000000 --- a/autoapi/fairchem/data/om/omdata/orca/index.html +++ /dev/null @@ -1,622 +0,0 @@ - - - - - - - - - - - fairchem.data.om.omdata.orca — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.om.omdata.orca

- -
-
- -
-

Contents

-
- -
-
-
- - - - - - - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/data/om/omdata/orca/recipes/index.html b/autoapi/fairchem/data/om/omdata/orca/recipes/index.html deleted file mode 100644 index 8313129d2..000000000 --- a/autoapi/fairchem/data/om/omdata/orca/recipes/index.html +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - fairchem.data.om.omdata.orca.recipes — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.data.om.omdata.orca.recipes

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.data.om.omdata.orca.recipes#

-
-

Module Contents#

-
-

Functions#

- - - - - - - - - -

single_point_calculation(atoms, charge, spin_multiplicity)

Wrapper around QUACC's static job to standardize single-point calculations.

ase_relaxation(atoms, charge, spin_multiplicity[, xc, ...])

Wrapper around QUACC's ase_relax_job to standardize geometry optimizations.

-
-
-fairchem.data.om.omdata.orca.recipes.single_point_calculation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, outputdir=os.getcwd(), **calc_kwargs)#
-

Wrapper around QUACC’s static job to standardize single-point calculations. -See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 -for more details.

-
-
Parameters:
-
    -
  • atoms (Atoms) – Atoms object

  • -
  • charge (int) – Charge of system

  • -
  • spin_multiplicity (int) – Multiplicity of the system

  • -
  • xc (str) – Exchange-correlaction functional

  • -
  • basis (str) – Basis set

  • -
  • orcasimpleinput (list) – List of orcasimpleinput settings for the calculator

  • -
  • orcablocks (list) – List of orcablocks swaps for the calculator

  • -
  • nprocs (int) – Number of processes to parallelize across

  • -
  • outputdir (str) – Directory to move results to upon completion

  • -
  • calc_kwargs – Additional kwargs for the custom Orca calculator

  • -
-
-
-
- -
-
-fairchem.data.om.omdata.orca.recipes.ase_relaxation(atoms, charge, spin_multiplicity, xc=ORCA_FUNCTIONAL, basis=ORCA_BASIS, orcasimpleinput=None, orcablocks=None, nprocs=12, opt_params=None, outputdir=os.getcwd(), **calc_kwargs)#
-

Wrapper around QUACC’s ase_relax_job to standardize geometry optimizations. -See github.com/Quantum-Accelerators/quacc/blob/main/src/quacc/recipes/orca/core.py#L22 -for more details.

-
-
Parameters:
-
    -
  • atoms (Atoms) – Atoms object

  • -
  • charge (int) – Charge of system

  • -
  • spin_multiplicity (int) – Multiplicity of the system

  • -
  • xc (str) – Exchange-correlaction functional

  • -
  • basis (str) – Basis set

  • -
  • orcasimpleinput (list) – List of orcasimpleinput settings for the calculator

  • -
  • orcablocks (list) – List of orcablocks swaps for the calculator

  • -
  • nprocs (int) – Number of processes to parallelize across

  • -
  • opt_params (dict) – Dictionary of optimizer parameters

  • -
  • outputdir (str) – Directory to move results to upon completion

  • -
  • calc_kwargs – Additional kwargs for the custom Orca calculator

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/client/client/index.html b/autoapi/fairchem/demo/ocpapi/client/client/index.html deleted file mode 100644 index 337f43595..000000000 --- a/autoapi/fairchem/demo/ocpapi/client/client/index.html +++ /dev/null @@ -1,977 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.client.client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.client.client#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

Client

Exposes each route in the OCP API as a method.

-
-
-exception fairchem.demo.ocpapi.client.client.RequestException(method: str, url: str, cause: str)#
-

Bases: Exception

-

Exception raised any time there is an error while making an API call.

-
- -
-
-exception fairchem.demo.ocpapi.client.client.NonRetryableRequestException(method: str, url: str, cause: str)#
-

Bases: RequestException

-

Exception raised when an API call is rejected for a reason that will -not succeed on retry. For example, this might include a malformed request -or action that is not allowed.

-
- -
-
-exception fairchem.demo.ocpapi.client.client.RateLimitExceededException(method: str, url: str, retry_after: datetime.timedelta | None = None)#
-

Bases: RequestException

-

Exception raised when an API call is rejected because a rate limit has -been exceeded.

-
-
-retry_after#
-

If known, the time to wait before the next attempt to -call the API should be made.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.client.Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https')#
-

Exposes each route in the OCP API as a method.

-
-
-property host: str#
-

The host being called by this client.

-
- -
-
-async get_models() fairchem.demo.ocpapi.client.models.Models#
-

Fetch the list of models that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The models that are supported in the API.

-
-
-
- -
-
-async get_bulks() fairchem.demo.ocpapi.client.models.Bulks#
-

Fetch the list of bulk materials that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The bulks that are supported throughout the API.

-
-
-
- -
-
-async get_adsorbates() fairchem.demo.ocpapi.client.models.Adsorbates#
-

Fetch the list of adsorbates that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The adsorbates that are supported throughout the API.

-
-
-
- -
-
-async get_slabs(bulk: str | fairchem.demo.ocpapi.client.models.Bulk) fairchem.demo.ocpapi.client.models.Slabs#
-

Get a unique list of slabs for the input bulk structure.

-
-
Parameters:
-

bulk – If a string, the id of the bulk to use. Otherwise the Bulk -instance to use.

-
-
Raises:
-
-
-
Returns:
-

Slabs for each of the unique surfaces of the material.

-
-
-
- -
-
-async get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs#
-

Get a list of possible binding sites for the input adsorbate on the -input slab.

-
-
Parameters:
-
    -
  • adsorbate – Description of the the adsorbate to place.

  • -
  • slab – Information about the slab on which the adsorbate should -be placed.

  • -
-
-
Raises:
-
-
-
Returns:
-

Configurations for each adsorbate binding site on the slab.

-
-
-
- -
-
-async submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem#
-

Starts relaxations of the input adsorbate configurations on the input -slab using energies and forces returned by the input model. Relaxations -are run asynchronously and results can be fetched using the system id -that is returned from this method.

-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate being simulated.

  • -
  • adsorbate_configs – List of adsorbate configurations to relax. This -should only include the adsorbates themselves; the surface is -defined in the “slab” field that is a peer to this one.

  • -
  • bulk – Details of the bulk material being simulated.

  • -
  • slab – The structure of the slab on which adsorbates are placed.

  • -
  • model – The model that will be used to evaluate energies and forces -during relaxations.

  • -
  • ephemeral – If False (default), any later attempt to delete the -generated relaxations will be rejected. If True, deleting the -relaxations will be allowed, which is generally useful for -testing when there is no reason for results to be persisted.

  • -
-
-
Raises:
-
-
-
Returns:
-

IDs of the relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_request(system_id: str) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest#
-

Fetches the original relaxations request for the input system.

-
-
Parameters:
-

system_id – The ID of the system to fetch.

-
-
Raises:
-
-
-
Returns:
-

The original request that was made when submitting relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults#
-

Fetches relaxation results for the input system.

-
-
Parameters:
-
    -
  • system_id – The system id of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Raises:
-
-
-
Returns:
-

The relaxation results for each configuration in the system.

-
-
-
- -
-
-async delete_adsorbate_slab_relaxations(system_id: str) None#
-

Deletes all relaxation results for the input system.

-
-
Parameters:
-

system_id – The ID of the system to delete.

-
-
Raises:
-
-
-
-
- -
-
-async _run_request(path: str, method: str, **kwargs) str#
-

Helper method that runs the input request on a thread so that -it doesn’t block the event loop on the calling thread.

-
-
Parameters:
-
    -
  • path – The URL path to make the request against.

  • -
  • method – The HTTP method to use (GET, POST, etc.).

  • -
-
-
Raises:
-
-
-
Returns:
-

The response body from the request as a string.

-
-
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/client/index.html b/autoapi/fairchem/demo/ocpapi/client/index.html deleted file mode 100644 index 7fb5579c1..000000000 --- a/autoapi/fairchem/demo/ocpapi/client/index.html +++ /dev/null @@ -1,1705 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.client

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.client#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Client

Exposes each route in the OCP API as a method.

Adsorbates

Stores the response from a request to fetch adsorbates supported in the

AdsorbateSlabConfigs

Stores the response from a request to fetch placements of a single

AdsorbateSlabRelaxationResult

Stores information about a single adsorbate slab configuration, including

AdsorbateSlabRelaxationsRequest

Stores the request to submit a new batch of adsorbate slab relaxations.

AdsorbateSlabRelaxationsResults

Stores the response from a request for results of adsorbate slab

AdsorbateSlabRelaxationsSystem

Stores the response from a request to submit a new batch of adsorbate

Atoms

Subset of the fields from an ASE Atoms object that are used within this

Bulk

Stores information about a single bulk material.

Bulks

Stores the response from a request to fetch bulks supported in the API.

Model

Stores information about a single model supported in the API.

Models

Stores the response from a request for models supported in the API.

Slab

Stores all information about a slab that is returned from the API.

SlabMetadata

Stores metadata about a slab that is returned from the API.

Slabs

Stores the response from a request to fetch slabs for a bulk structure.

Status

Relaxation status of a single adsorbate placement on a slab.

-
-
-

Functions#

- - - - - - -

get_results_ui_url(→ Optional[str])

Generates the URL at which results for the input system can be

-
-
-class fairchem.demo.ocpapi.client.Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https')#
-

Exposes each route in the OCP API as a method.

-
-
-property host: str#
-

The host being called by this client.

-
- -
-
-async get_models() fairchem.demo.ocpapi.client.models.Models#
-

Fetch the list of models that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The models that are supported in the API.

-
-
-
- -
-
-async get_bulks() fairchem.demo.ocpapi.client.models.Bulks#
-

Fetch the list of bulk materials that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The bulks that are supported throughout the API.

-
-
-
- -
-
-async get_adsorbates() fairchem.demo.ocpapi.client.models.Adsorbates#
-

Fetch the list of adsorbates that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The adsorbates that are supported throughout the API.

-
-
-
- -
-
-async get_slabs(bulk: str | fairchem.demo.ocpapi.client.models.Bulk) fairchem.demo.ocpapi.client.models.Slabs#
-

Get a unique list of slabs for the input bulk structure.

-
-
Parameters:
-

bulk – If a string, the id of the bulk to use. Otherwise the Bulk -instance to use.

-
-
Raises:
-
-
-
Returns:
-

Slabs for each of the unique surfaces of the material.

-
-
-
- -
-
-async get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs#
-

Get a list of possible binding sites for the input adsorbate on the -input slab.

-
-
Parameters:
-
    -
  • adsorbate – Description of the the adsorbate to place.

  • -
  • slab – Information about the slab on which the adsorbate should -be placed.

  • -
-
-
Raises:
-
-
-
Returns:
-

Configurations for each adsorbate binding site on the slab.

-
-
-
- -
-
-async submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem#
-

Starts relaxations of the input adsorbate configurations on the input -slab using energies and forces returned by the input model. Relaxations -are run asynchronously and results can be fetched using the system id -that is returned from this method.

-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate being simulated.

  • -
  • adsorbate_configs – List of adsorbate configurations to relax. This -should only include the adsorbates themselves; the surface is -defined in the “slab” field that is a peer to this one.

  • -
  • bulk – Details of the bulk material being simulated.

  • -
  • slab – The structure of the slab on which adsorbates are placed.

  • -
  • model – The model that will be used to evaluate energies and forces -during relaxations.

  • -
  • ephemeral – If False (default), any later attempt to delete the -generated relaxations will be rejected. If True, deleting the -relaxations will be allowed, which is generally useful for -testing when there is no reason for results to be persisted.

  • -
-
-
Raises:
-
-
-
Returns:
-

IDs of the relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_request(system_id: str) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest#
-

Fetches the original relaxations request for the input system.

-
-
Parameters:
-

system_id – The ID of the system to fetch.

-
-
Raises:
-
-
-
Returns:
-

The original request that was made when submitting relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults#
-

Fetches relaxation results for the input system.

-
-
Parameters:
-
    -
  • system_id – The system id of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Raises:
-
-
-
Returns:
-

The relaxation results for each configuration in the system.

-
-
-
- -
-
-async delete_adsorbate_slab_relaxations(system_id: str) None#
-

Deletes all relaxation results for the input system.

-
-
Parameters:
-

system_id – The ID of the system to delete.

-
-
Raises:
-
-
-
-
- -
-
-async _run_request(path: str, method: str, **kwargs) str#
-

Helper method that runs the input request on a thread so that -it doesn’t block the event loop on the calling thread.

-
-
Parameters:
-
    -
  • path – The URL path to make the request against.

  • -
  • method – The HTTP method to use (GET, POST, etc.).

  • -
-
-
Raises:
-
-
-
Returns:
-

The response body from the request as a string.

-
-
-
- -
- -
-
-exception fairchem.demo.ocpapi.client.NonRetryableRequestException(method: str, url: str, cause: str)#
-

Bases: RequestException

-

Exception raised when an API call is rejected for a reason that will -not succeed on retry. For example, this might include a malformed request -or action that is not allowed.

-
- -
-
-exception fairchem.demo.ocpapi.client.RateLimitExceededException(method: str, url: str, retry_after: datetime.timedelta | None = None)#
-

Bases: RequestException

-

Exception raised when an API call is rejected because a rate limit has -been exceeded.

-
-
-retry_after#
-

If known, the time to wait before the next attempt to -call the API should be made.

-
- -
- -
-
-exception fairchem.demo.ocpapi.client.RequestException(method: str, url: str, cause: str)#
-

Bases: Exception

-

Exception raised any time there is an error while making an API call.

-
- -
-
-class fairchem.demo.ocpapi.client.Adsorbates#
-

Bases: _DataModel

-

Stores the response from a request to fetch adsorbates supported in the -API.

-
-
-adsorbates_supported: List[str]#
-

List of adsorbates that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.AdsorbateSlabConfigs#
-

Bases: _DataModel

-

Stores the response from a request to fetch placements of a single -absorbate on a slab.

-
-
-adsorbate_configs: List[Atoms]#
-

List of structures, each representing one possible adsorbate placement.

-
- -
-
-slab: Slab#
-

The structure of the slab on which the adsorbate is placed.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult#
-

Bases: _DataModel

-

Stores information about a single adsorbate slab configuration, including -outputs for the model used in relaxations.

-

The API to fetch relaxation results supports requesting a subset of fields -in order to limit the size of response payloads. Optional attributes will -be defined only if they are including the response.

-
-
-config_id: int#
-

ID of the configuration within the system.

-
- -
-
-status: Status#
-

The status of the request for information about this configuration.

-
- -
-
-system_id: str | None#
-

The ID of the system in which the configuration was originally submitted.

-
- -
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] | None#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool] | None#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int] | None#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]] | None#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int] | None#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-energy: float | None#
-

The energy of the configuration.

-
- -
-
-energy_trajectory: List[float] | None#
-

The energy of the configuration at each point along the relaxation -trajectory.

-
- -
-
-forces: List[Tuple[float, float, float]] | None#
-

The forces on each atom in the relaxed structure.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-

The predicted energy and forces will also be copied to the new -ase.Atoms object as a SinglePointCalculator (a calculator that -stores the results of an already-run simulation).

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest#
-

Bases: _DataModel

-

Stores the request to submit a new batch of adsorbate slab relaxations.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-adsorbate_configs: List[Atoms]#
-

List of adsorbate placements being relaxed.

-
- -
-
-bulk: Bulk#
-

Information about the original bulk structure used to create the slab.

-
- -
-
-slab: Slab#
-

The structure of the slab on which adsorbates are placed.

-
- -
-
-model: str#
-

The type of the ML model being used during relaxations.

-
- -
-
-ephemeral: bool | None#
-

Whether the relaxations can be deleted (assume they cannot be deleted if -None).

-
- -
-
-adsorbate_reaction: str | None#
-

If possible, an html-formatted string describing the reaction will be added -to this field.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults#
-

Bases: _DataModel

-

Stores the response from a request for results of adsorbate slab -relaxations.

-
-
-configs: List[AdsorbateSlabRelaxationResult]#
-

List of configurations in the system, each representing one placement of -an adsorbate on a slab surface.

-
- -
-
-omitted_config_ids: List[int]#
-

List of IDs of configurations that were requested but omitted by the -server. Results for these IDs can be requested again.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsSystem#
-

Bases: _DataModel

-

Stores the response from a request to submit a new batch of adsorbate -slab relaxations.

-
-
-system_id: str#
-

Unique ID for this set of relaxations which can be used to fetch results -later.

-
- -
-
-config_ids: List[int]#
-

The list of IDs assigned to each of the input adsorbate placements, in the -same order in which they were submitted.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Atoms#
-

Bases: _DataModel

-

Subset of the fields from an ASE Atoms object that are used within this -API.

-
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool]#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int]#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]]#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int]#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.client.Bulk#
-

Bases: _DataModel

-

Stores information about a single bulk material.

-
-
-src_id: str#
-

The ID of the material.

-
- -
-
-formula: str#
-

The chemical formula of the material.

-
- -
-
-elements: List[str]#
-

The list of elements in the material.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Bulks#
-

Bases: _DataModel

-

Stores the response from a request to fetch bulks supported in the API.

-
-
-bulks_supported: List[Bulk]#
-

List of bulks that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Model#
-

Bases: _DataModel

-

Stores information about a single model supported in the API.

-
-
-id: str#
-

The ID of the model.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Models#
-

Bases: _DataModel

-

Stores the response from a request for models supported in the API.

-
-
-models: List[Model]#
-

The list of models that are supported.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Slab#
-

Bases: _DataModel

-

Stores all information about a slab that is returned from the API.

-
-
-atoms: Atoms#
-

The structure of the slab.

-
- -
-
-metadata: SlabMetadata#
-

Extra information about the slab.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.SlabMetadata#
-

Bases: _DataModel

-

Stores metadata about a slab that is returned from the API.

-
-
-bulk_src_id: str#
-

The ID of the bulk material from which the slab was derived.

-
- -
-
-millers: Tuple[int, int, int]#
-

The Miller indices of the slab relative to bulk structure.

-
- -
-
-shift: float#
-

The position along the vector defined by the Miller indices at which a -cut was taken to generate the slab surface.

-
- -
-
-top: bool#
-

If False, the top and bottom surfaces for this millers/shift pair are -distinct and this slab represents the bottom surface.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Slabs#
-

Bases: _DataModel

-

Stores the response from a request to fetch slabs for a bulk structure.

-
-
-slabs: List[Slab]#
-

The list of slabs that were generated from the input bulk structure.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.Status(*args, **kwds)#
-

Bases: enum.Enum

-

Relaxation status of a single adsorbate placement on a slab.

-
-
-NOT_AVAILABLE = 'not_available'#
-

The configuration exists but the result is not yet available. It is -possible that checking again in the future could yield a result.

-
- -
-
-FAILED_RELAXATION = 'failed_relaxation'#
-

The relaxation failed for this configuration.

-
- -
-
-SUCCESS = 'success'#
-

The relaxation was successful and the requested information about the -configuration was returned.

-
- -
-
-DOES_NOT_EXIST = 'does_not_exist'#
-

The requested configuration does not exist.

-
- -
-
-__str__() str#
-

Return str(self).

-
- -
- -
-
-fairchem.demo.ocpapi.client.get_results_ui_url(api_host: str, system_id: str) str | None#
-

Generates the URL at which results for the input system can be -visualized.

-
-
Parameters:
-
    -
  • api_host – The API host on which the system was run.

  • -
  • system_id – ID of the system being visualized.

  • -
-
-
Returns:
-

The URL at which the input system can be visualized. None if the -API host is not recognized.

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/client/models/index.html b/autoapi/fairchem/demo/ocpapi/client/models/index.html deleted file mode 100644 index 4c03f2c61..000000000 --- a/autoapi/fairchem/demo/ocpapi/client/models/index.html +++ /dev/null @@ -1,1338 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.client.models — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.client.models

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.client.models#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

_DataModel

Base class for all data models.

Model

Stores information about a single model supported in the API.

Models

Stores the response from a request for models supported in the API.

Bulk

Stores information about a single bulk material.

Bulks

Stores the response from a request to fetch bulks supported in the API.

Adsorbates

Stores the response from a request to fetch adsorbates supported in the

Atoms

Subset of the fields from an ASE Atoms object that are used within this

SlabMetadata

Stores metadata about a slab that is returned from the API.

Slab

Stores all information about a slab that is returned from the API.

Slabs

Stores the response from a request to fetch slabs for a bulk structure.

AdsorbateSlabConfigs

Stores the response from a request to fetch placements of a single

AdsorbateSlabRelaxationsSystem

Stores the response from a request to submit a new batch of adsorbate

AdsorbateSlabRelaxationsRequest

Stores the request to submit a new batch of adsorbate slab relaxations.

Status

Relaxation status of a single adsorbate placement on a slab.

AdsorbateSlabRelaxationResult

Stores information about a single adsorbate slab configuration, including

AdsorbateSlabRelaxationsResults

Stores the response from a request for results of adsorbate slab

-
-
-class fairchem.demo.ocpapi.client.models._DataModel#
-

Base class for all data models.

-
-
-other_fields: dataclasses_json.CatchAll#
-

Fields that may have been added to the API that all not yet supported -explicitly in this class.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Model#
-

Bases: _DataModel

-

Stores information about a single model supported in the API.

-
-
-id: str#
-

The ID of the model.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Models#
-

Bases: _DataModel

-

Stores the response from a request for models supported in the API.

-
-
-models: List[Model]#
-

The list of models that are supported.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Bulk#
-

Bases: _DataModel

-

Stores information about a single bulk material.

-
-
-src_id: str#
-

The ID of the material.

-
- -
-
-formula: str#
-

The chemical formula of the material.

-
- -
-
-elements: List[str]#
-

The list of elements in the material.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Bulks#
-

Bases: _DataModel

-

Stores the response from a request to fetch bulks supported in the API.

-
-
-bulks_supported: List[Bulk]#
-

List of bulks that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Adsorbates#
-

Bases: _DataModel

-

Stores the response from a request to fetch adsorbates supported in the -API.

-
-
-adsorbates_supported: List[str]#
-

List of adsorbates that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Atoms#
-

Bases: _DataModel

-

Subset of the fields from an ASE Atoms object that are used within this -API.

-
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool]#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int]#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]]#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int]#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.SlabMetadata#
-

Bases: _DataModel

-

Stores metadata about a slab that is returned from the API.

-
-
-bulk_src_id: str#
-

The ID of the bulk material from which the slab was derived.

-
- -
-
-millers: Tuple[int, int, int]#
-

The Miller indices of the slab relative to bulk structure.

-
- -
-
-shift: float#
-

The position along the vector defined by the Miller indices at which a -cut was taken to generate the slab surface.

-
- -
-
-top: bool#
-

If False, the top and bottom surfaces for this millers/shift pair are -distinct and this slab represents the bottom surface.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Slab#
-

Bases: _DataModel

-

Stores all information about a slab that is returned from the API.

-
-
-atoms: Atoms#
-

The structure of the slab.

-
- -
-
-metadata: SlabMetadata#
-

Extra information about the slab.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Slabs#
-

Bases: _DataModel

-

Stores the response from a request to fetch slabs for a bulk structure.

-
-
-slabs: List[Slab]#
-

The list of slabs that were generated from the input bulk structure.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs#
-

Bases: _DataModel

-

Stores the response from a request to fetch placements of a single -absorbate on a slab.

-
-
-adsorbate_configs: List[Atoms]#
-

List of structures, each representing one possible adsorbate placement.

-
- -
-
-slab: Slab#
-

The structure of the slab on which the adsorbate is placed.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem#
-

Bases: _DataModel

-

Stores the response from a request to submit a new batch of adsorbate -slab relaxations.

-
-
-system_id: str#
-

Unique ID for this set of relaxations which can be used to fetch results -later.

-
- -
-
-config_ids: List[int]#
-

The list of IDs assigned to each of the input adsorbate placements, in the -same order in which they were submitted.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest#
-

Bases: _DataModel

-

Stores the request to submit a new batch of adsorbate slab relaxations.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-adsorbate_configs: List[Atoms]#
-

List of adsorbate placements being relaxed.

-
- -
-
-bulk: Bulk#
-

Information about the original bulk structure used to create the slab.

-
- -
-
-slab: Slab#
-

The structure of the slab on which adsorbates are placed.

-
- -
-
-model: str#
-

The type of the ML model being used during relaxations.

-
- -
-
-ephemeral: bool | None#
-

Whether the relaxations can be deleted (assume they cannot be deleted if -None).

-
- -
-
-adsorbate_reaction: str | None#
-

If possible, an html-formatted string describing the reaction will be added -to this field.

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.Status(*args, **kwds)#
-

Bases: enum.Enum

-

Relaxation status of a single adsorbate placement on a slab.

-
-
-NOT_AVAILABLE = 'not_available'#
-

The configuration exists but the result is not yet available. It is -possible that checking again in the future could yield a result.

-
- -
-
-FAILED_RELAXATION = 'failed_relaxation'#
-

The relaxation failed for this configuration.

-
- -
-
-SUCCESS = 'success'#
-

The relaxation was successful and the requested information about the -configuration was returned.

-
- -
-
-DOES_NOT_EXIST = 'does_not_exist'#
-

The requested configuration does not exist.

-
- -
-
-__str__() str#
-

Return str(self).

-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationResult#
-

Bases: _DataModel

-

Stores information about a single adsorbate slab configuration, including -outputs for the model used in relaxations.

-

The API to fetch relaxation results supports requesting a subset of fields -in order to limit the size of response payloads. Optional attributes will -be defined only if they are including the response.

-
-
-config_id: int#
-

ID of the configuration within the system.

-
- -
-
-status: Status#
-

The status of the request for information about this configuration.

-
- -
-
-system_id: str | None#
-

The ID of the system in which the configuration was originally submitted.

-
- -
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] | None#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool] | None#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int] | None#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]] | None#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int] | None#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-energy: float | None#
-

The energy of the configuration.

-
- -
-
-energy_trajectory: List[float] | None#
-

The energy of the configuration at each point along the relaxation -trajectory.

-
- -
-
-forces: List[Tuple[float, float, float]] | None#
-

The forces on each atom in the relaxed structure.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-

The predicted energy and forces will also be copied to the new -ase.Atoms object as a SinglePointCalculator (a calculator that -stores the results of an already-run simulation).

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults#
-

Bases: _DataModel

-

Stores the response from a request for results of adsorbate slab -relaxations.

-
-
-configs: List[AdsorbateSlabRelaxationResult]#
-

List of configurations in the system, each representing one placement of -an adsorbate on a slab surface.

-
- -
-
-omitted_config_ids: List[int]#
-

List of IDs of configurations that were requested but omitted by the -server. Results for these IDs can be requested again.

-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/client/ui/index.html b/autoapi/fairchem/demo/ocpapi/client/ui/index.html deleted file mode 100644 index 7d9e2c228..000000000 --- a/autoapi/fairchem/demo/ocpapi/client/ui/index.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.client.ui — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.client.ui

- -
- -
-
- - - - -
- -
-

fairchem.demo.ocpapi.client.ui#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

get_results_ui_url(→ Optional[str])

Generates the URL at which results for the input system can be

-
-
-

Attributes#

- - - - - - -

_API_TO_UI_HOSTS

-
-
-fairchem.demo.ocpapi.client.ui._API_TO_UI_HOSTS: Dict[str, str]#
-
- -
-
-fairchem.demo.ocpapi.client.ui.get_results_ui_url(api_host: str, system_id: str) str | None#
-

Generates the URL at which results for the input system can be -visualized.

-
-
Parameters:
-
    -
  • api_host – The API host on which the system was run.

  • -
  • system_id – ID of the system being visualized.

  • -
-
-
Returns:
-

The URL at which the input system can be visualized. None if the -API host is not recognized.

-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/index.html b/autoapi/fairchem/demo/ocpapi/index.html deleted file mode 100644 index 70ec3fc23..000000000 --- a/autoapi/fairchem/demo/ocpapi/index.html +++ /dev/null @@ -1,2206 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi#

-
-

Subpackages#

- -
-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Client

Exposes each route in the OCP API as a method.

Adsorbates

Stores the response from a request to fetch adsorbates supported in the

AdsorbateSlabConfigs

Stores the response from a request to fetch placements of a single

AdsorbateSlabRelaxationResult

Stores information about a single adsorbate slab configuration, including

AdsorbateSlabRelaxationsRequest

Stores the request to submit a new batch of adsorbate slab relaxations.

AdsorbateSlabRelaxationsResults

Stores the response from a request for results of adsorbate slab

AdsorbateSlabRelaxationsSystem

Stores the response from a request to submit a new batch of adsorbate

Atoms

Subset of the fields from an ASE Atoms object that are used within this

Bulk

Stores information about a single bulk material.

Bulks

Stores the response from a request to fetch bulks supported in the API.

Model

Stores information about a single model supported in the API.

Models

Stores the response from a request for models supported in the API.

Slab

Stores all information about a slab that is returned from the API.

SlabMetadata

Stores metadata about a slab that is returned from the API.

Slabs

Stores the response from a request to fetch slabs for a bulk structure.

Status

Relaxation status of a single adsorbate placement on a slab.

AdsorbateBindingSites

Stores the inputs and results of a set of relaxations of adsorbate

AdsorbateSlabRelaxations

Stores the relaxations of adsorbate placements on the surface of a slab.

Lifetime

Represents different lifetimes when running relaxations.

keep_all_slabs

Adslab filter than returns all slabs.

keep_slabs_with_miller_indices

Adslab filter that keeps any slabs with the configured miller indices.

prompt_for_slabs_to_keep

Adslab filter than presents the user with an interactive prompt to choose

RateLimitLogging

Controls logging when rate limits are hit.

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - -

get_results_ui_url(→ Optional[str])

Generates the URL at which results for the input system can be

find_adsorbate_binding_sites(→ AdsorbateBindingSites)

Search for adsorbate binding sites on surfaces of a bulk material.

get_adsorbate_slab_relaxation_results(...)

Wrapper around Client.get_adsorbate_slab_relaxations_results() that

wait_for_adsorbate_slab_relaxations(→ Dict[int, ...)

Blocks until all relaxations in the input system have finished, whether

retry_api_calls(→ Any)

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
-

Attributes#

- - - - - - - - - -

NO_LIMIT

NoLimitType

-
-
-class fairchem.demo.ocpapi.Client(host: str = 'open-catalyst-api.metademolab.com', scheme: str = 'https')#
-

Exposes each route in the OCP API as a method.

-
-
-property host: str#
-

The host being called by this client.

-
- -
-
-async get_models() fairchem.demo.ocpapi.client.models.Models#
-

Fetch the list of models that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The models that are supported in the API.

-
-
-
- -
-
-async get_bulks() fairchem.demo.ocpapi.client.models.Bulks#
-

Fetch the list of bulk materials that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The bulks that are supported throughout the API.

-
-
-
- -
-
-async get_adsorbates() fairchem.demo.ocpapi.client.models.Adsorbates#
-

Fetch the list of adsorbates that are supported in the API.

-
-
Raises:
-
-
-
Returns:
-

The adsorbates that are supported throughout the API.

-
-
-
- -
-
-async get_slabs(bulk: str | fairchem.demo.ocpapi.client.models.Bulk) fairchem.demo.ocpapi.client.models.Slabs#
-

Get a unique list of slabs for the input bulk structure.

-
-
Parameters:
-

bulk – If a string, the id of the bulk to use. Otherwise the Bulk -instance to use.

-
-
Raises:
-
-
-
Returns:
-

Slabs for each of the unique surfaces of the material.

-
-
-
- -
-
-async get_adsorbate_slab_configs(adsorbate: str, slab: fairchem.demo.ocpapi.client.models.Slab) fairchem.demo.ocpapi.client.models.AdsorbateSlabConfigs#
-

Get a list of possible binding sites for the input adsorbate on the -input slab.

-
-
Parameters:
-
    -
  • adsorbate – Description of the the adsorbate to place.

  • -
  • slab – Information about the slab on which the adsorbate should -be placed.

  • -
-
-
Raises:
-
-
-
Returns:
-

Configurations for each adsorbate binding site on the slab.

-
-
-
- -
-
-async submit_adsorbate_slab_relaxations(adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.models.Atoms], bulk: fairchem.demo.ocpapi.client.models.Bulk, slab: fairchem.demo.ocpapi.client.models.Slab, model: str, ephemeral: bool = False) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsSystem#
-

Starts relaxations of the input adsorbate configurations on the input -slab using energies and forces returned by the input model. Relaxations -are run asynchronously and results can be fetched using the system id -that is returned from this method.

-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate being simulated.

  • -
  • adsorbate_configs – List of adsorbate configurations to relax. This -should only include the adsorbates themselves; the surface is -defined in the “slab” field that is a peer to this one.

  • -
  • bulk – Details of the bulk material being simulated.

  • -
  • slab – The structure of the slab on which adsorbates are placed.

  • -
  • model – The model that will be used to evaluate energies and forces -during relaxations.

  • -
  • ephemeral – If False (default), any later attempt to delete the -generated relaxations will be rejected. If True, deleting the -relaxations will be allowed, which is generally useful for -testing when there is no reason for results to be persisted.

  • -
-
-
Raises:
-
-
-
Returns:
-

IDs of the relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_request(system_id: str) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsRequest#
-

Fetches the original relaxations request for the input system.

-
-
Parameters:
-

system_id – The ID of the system to fetch.

-
-
Raises:
-
-
-
Returns:
-

The original request that was made when submitting relaxations.

-
-
-
- -
-
-async get_adsorbate_slab_relaxations_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None) fairchem.demo.ocpapi.client.models.AdsorbateSlabRelaxationsResults#
-

Fetches relaxation results for the input system.

-
-
Parameters:
-
    -
  • system_id – The system id of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Raises:
-
-
-
Returns:
-

The relaxation results for each configuration in the system.

-
-
-
- -
-
-async delete_adsorbate_slab_relaxations(system_id: str) None#
-

Deletes all relaxation results for the input system.

-
-
Parameters:
-

system_id – The ID of the system to delete.

-
-
Raises:
-
-
-
-
- -
-
-async _run_request(path: str, method: str, **kwargs) str#
-

Helper method that runs the input request on a thread so that -it doesn’t block the event loop on the calling thread.

-
-
Parameters:
-
    -
  • path – The URL path to make the request against.

  • -
  • method – The HTTP method to use (GET, POST, etc.).

  • -
-
-
Raises:
-
-
-
Returns:
-

The response body from the request as a string.

-
-
-
- -
- -
-
-exception fairchem.demo.ocpapi.NonRetryableRequestException(method: str, url: str, cause: str)#
-

Bases: RequestException

-

Exception raised when an API call is rejected for a reason that will -not succeed on retry. For example, this might include a malformed request -or action that is not allowed.

-
- -
-
-exception fairchem.demo.ocpapi.RateLimitExceededException(method: str, url: str, retry_after: datetime.timedelta | None = None)#
-

Bases: RequestException

-

Exception raised when an API call is rejected because a rate limit has -been exceeded.

-
-
-retry_after#
-

If known, the time to wait before the next attempt to -call the API should be made.

-
- -
- -
-
-exception fairchem.demo.ocpapi.RequestException(method: str, url: str, cause: str)#
-

Bases: Exception

-

Exception raised any time there is an error while making an API call.

-
- -
-
-class fairchem.demo.ocpapi.Adsorbates#
-

Bases: _DataModel

-

Stores the response from a request to fetch adsorbates supported in the -API.

-
-
-adsorbates_supported: List[str]#
-

List of adsorbates that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabConfigs#
-

Bases: _DataModel

-

Stores the response from a request to fetch placements of a single -absorbate on a slab.

-
-
-adsorbate_configs: List[Atoms]#
-

List of structures, each representing one possible adsorbate placement.

-
- -
-
-slab: Slab#
-

The structure of the slab on which the adsorbate is placed.

-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabRelaxationResult#
-

Bases: _DataModel

-

Stores information about a single adsorbate slab configuration, including -outputs for the model used in relaxations.

-

The API to fetch relaxation results supports requesting a subset of fields -in order to limit the size of response payloads. Optional attributes will -be defined only if they are including the response.

-
-
-config_id: int#
-

ID of the configuration within the system.

-
- -
-
-status: Status#
-

The status of the request for information about this configuration.

-
- -
-
-system_id: str | None#
-

The ID of the system in which the configuration was originally submitted.

-
- -
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]] | None#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool] | None#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int] | None#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]] | None#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int] | None#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-energy: float | None#
-

The energy of the configuration.

-
- -
-
-energy_trajectory: List[float] | None#
-

The energy of the configuration at each point along the relaxation -trajectory.

-
- -
-
-forces: List[Tuple[float, float, float]] | None#
-

The forces on each atom in the relaxed structure.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-

The predicted energy and forces will also be copied to the new -ase.Atoms object as a SinglePointCalculator (a calculator that -stores the results of an already-run simulation).

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabRelaxationsRequest#
-

Bases: _DataModel

-

Stores the request to submit a new batch of adsorbate slab relaxations.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-adsorbate_configs: List[Atoms]#
-

List of adsorbate placements being relaxed.

-
- -
-
-bulk: Bulk#
-

Information about the original bulk structure used to create the slab.

-
- -
-
-slab: Slab#
-

The structure of the slab on which adsorbates are placed.

-
- -
-
-model: str#
-

The type of the ML model being used during relaxations.

-
- -
-
-ephemeral: bool | None#
-

Whether the relaxations can be deleted (assume they cannot be deleted if -None).

-
- -
-
-adsorbate_reaction: str | None#
-

If possible, an html-formatted string describing the reaction will be added -to this field.

-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabRelaxationsResults#
-

Bases: _DataModel

-

Stores the response from a request for results of adsorbate slab -relaxations.

-
-
-configs: List[AdsorbateSlabRelaxationResult]#
-

List of configurations in the system, each representing one placement of -an adsorbate on a slab surface.

-
- -
-
-omitted_config_ids: List[int]#
-

List of IDs of configurations that were requested but omitted by the -server. Results for these IDs can be requested again.

-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabRelaxationsSystem#
-

Bases: _DataModel

-

Stores the response from a request to submit a new batch of adsorbate -slab relaxations.

-
-
-system_id: str#
-

Unique ID for this set of relaxations which can be used to fetch results -later.

-
- -
-
-config_ids: List[int]#
-

The list of IDs assigned to each of the input adsorbate placements, in the -same order in which they were submitted.

-
- -
- -
-
-class fairchem.demo.ocpapi.Atoms#
-

Bases: _DataModel

-

Subset of the fields from an ASE Atoms object that are used within this -API.

-
-
-cell: Tuple[Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]]#
-

3x3 matrix with unit cell vectors.

-
- -
-
-pbc: Tuple[bool, bool, bool]#
-

Whether the structure is periodic along the a, b, and c lattice vectors, -respectively.

-
- -
-
-numbers: List[int]#
-

The atomic number of each atom in the unit cell.

-
- -
-
-positions: List[Tuple[float, float, float]]#
-

The coordinates of each atom in the unit cell, relative to the cartesian -frame.

-
- -
-
-tags: List[int]#
-

Labels for each atom in the unit cell where 0 represents a subsurface atom -(fixed during optimization), 1 represents a surface atom, and 2 represents -an adsorbate atom.

-
- -
-
-to_ase_atoms() ase.Atoms#
-

Creates an ase.Atoms object with the positions, element numbers, -etc. populated from values on this object.

-
-
Returns:
-

ase.Atoms object with values from this object.

-
-
-
- -
- -
-
-class fairchem.demo.ocpapi.Bulk#
-

Bases: _DataModel

-

Stores information about a single bulk material.

-
-
-src_id: str#
-

The ID of the material.

-
- -
-
-formula: str#
-

The chemical formula of the material.

-
- -
-
-elements: List[str]#
-

The list of elements in the material.

-
- -
- -
-
-class fairchem.demo.ocpapi.Bulks#
-

Bases: _DataModel

-

Stores the response from a request to fetch bulks supported in the API.

-
-
-bulks_supported: List[Bulk]#
-

List of bulks that can be used in the API.

-
- -
- -
-
-class fairchem.demo.ocpapi.Model#
-

Bases: _DataModel

-

Stores information about a single model supported in the API.

-
-
-id: str#
-

The ID of the model.

-
- -
- -
-
-class fairchem.demo.ocpapi.Models#
-

Bases: _DataModel

-

Stores the response from a request for models supported in the API.

-
-
-models: List[Model]#
-

The list of models that are supported.

-
- -
- -
-
-class fairchem.demo.ocpapi.Slab#
-

Bases: _DataModel

-

Stores all information about a slab that is returned from the API.

-
-
-atoms: Atoms#
-

The structure of the slab.

-
- -
-
-metadata: SlabMetadata#
-

Extra information about the slab.

-
- -
- -
-
-class fairchem.demo.ocpapi.SlabMetadata#
-

Bases: _DataModel

-

Stores metadata about a slab that is returned from the API.

-
-
-bulk_src_id: str#
-

The ID of the bulk material from which the slab was derived.

-
- -
-
-millers: Tuple[int, int, int]#
-

The Miller indices of the slab relative to bulk structure.

-
- -
-
-shift: float#
-

The position along the vector defined by the Miller indices at which a -cut was taken to generate the slab surface.

-
- -
-
-top: bool#
-

If False, the top and bottom surfaces for this millers/shift pair are -distinct and this slab represents the bottom surface.

-
- -
- -
-
-class fairchem.demo.ocpapi.Slabs#
-

Bases: _DataModel

-

Stores the response from a request to fetch slabs for a bulk structure.

-
-
-slabs: List[Slab]#
-

The list of slabs that were generated from the input bulk structure.

-
- -
- -
-
-class fairchem.demo.ocpapi.Status(*args, **kwds)#
-

Bases: enum.Enum

-

Relaxation status of a single adsorbate placement on a slab.

-
-
-NOT_AVAILABLE = 'not_available'#
-

The configuration exists but the result is not yet available. It is -possible that checking again in the future could yield a result.

-
- -
-
-FAILED_RELAXATION = 'failed_relaxation'#
-

The relaxation failed for this configuration.

-
- -
-
-SUCCESS = 'success'#
-

The relaxation was successful and the requested information about the -configuration was returned.

-
- -
-
-DOES_NOT_EXIST = 'does_not_exist'#
-

The requested configuration does not exist.

-
- -
-
-__str__() str#
-

Return str(self).

-
- -
- -
-
-fairchem.demo.ocpapi.get_results_ui_url(api_host: str, system_id: str) str | None#
-

Generates the URL at which results for the input system can be -visualized.

-
-
Parameters:
-
    -
  • api_host – The API host on which the system was run.

  • -
  • system_id – ID of the system being visualized.

  • -
-
-
Returns:
-

The URL at which the input system can be visualized. None if the -API host is not recognized.

-
-
-
- -
-
-class fairchem.demo.ocpapi.AdsorbateBindingSites#
-

Stores the inputs and results of a set of relaxations of adsorbate -placements on the surface of a slab.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-bulk: fairchem.demo.ocpapi.client.Bulk#
-

The bulk material that was being modeled.

-
- -
-
-model: str#
-

The type of the model that was run.

-
- -
-
-slabs: List[AdsorbateSlabRelaxations]#
-

The list of slabs that were generated from the bulk structure. Each -contains its own list of adsorbate placements.

-
- -
- -
-
-class fairchem.demo.ocpapi.AdsorbateSlabRelaxations#
-

Stores the relaxations of adsorbate placements on the surface of a slab.

-
-
-slab: fairchem.demo.ocpapi.client.Slab#
-

The slab on which the adsorbate was placed.

-
- -
-
-configs: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Details of the relaxation of each adsorbate placement, including the -final position.

-
- -
-
-system_id: str#
-

The ID of the system that stores all of the relaxations.

-
- -
-
-api_host: str#
-

The API host on which the relaxations were run.

-
- -
-
-ui_url: str | None#
-

The URL at which results can be visualized.

-
- -
- -
-
-class fairchem.demo.ocpapi.Lifetime(*args, **kwds)#
-

Bases: enum.Enum

-

Represents different lifetimes when running relaxations.

-
-
-SAVE#
-

The relaxation will be available on API servers indefinitely. It will not -be possible to delete the relaxation in the future.

-
- -
-
-MARK_EPHEMERAL#
-

The relaxation will be saved on API servers, but can be deleted at any time -in the future.

-
- -
-
-DELETE#
-

The relaxation will be deleted from API servers as soon as the results have -been fetched.

-
- -
- -
-
-exception fairchem.demo.ocpapi.UnsupportedAdsorbateException(adsorbate: str)#
-

Bases: AdsorbatesException

-

Exception raised when an adsorbate is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.UnsupportedBulkException(bulk: str)#
-

Bases: AdsorbatesException

-

Exception raised when a bulk material is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.UnsupportedModelException(model: str, allowed_models: List[str])#
-

Bases: AdsorbatesException

-

Exception raised when a model is not supported in the API.

-
- -
-
-async fairchem.demo.ocpapi.find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) AdsorbateBindingSites#
-

Search for adsorbate binding sites on surfaces of a bulk material. -This executes the following steps:

-
-
    -
  1. Ensure that both the adsorbate and bulk are supported in the -OCP API.

  2. -
  3. Enumerate unique surfaces from the bulk material.

  4. -
  5. Enumerate likely binding sites for the input adsorbate on each -of the generated surfaces.

  6. -
  7. -
    Filter the list of generated adsorbate/slab (adslab) configurations

    using the input adslab_filter.

    -
    -
    -
  8. -
  9. Relax each generated surface+adsorbate structure by refining -atomic positions to minimize forces generated by the input model.

  10. -
-
-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate to place.

  • -
  • bulk – The ID (typically Materials Project MP ID) of the bulk material -on which the adsorbate will be placed.

  • -
  • model – The type of the model to use when calculating forces during -relaxations.

  • -
  • adslab_filter – A function that modifies the set of adsorbate/slab -configurations that will be relaxed. This can be used to subselect -slabs and/or adsorbate configurations.

  • -
  • client – The OCP API client to use.

  • -
  • lifetime – Whether relaxations should be saved on the server, be marked -as ephemeral (allowing them to deleted in the future), or deleted -immediately.

  • -
-
-
Returns:
-

Details of each adsorbate binding site, including results of relaxing -to locally-optimized positions using the input model.

-
-
Raises:
-
-
-
-
- -
-
-async fairchem.demo.ocpapi.get_adsorbate_slab_relaxation_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Wrapper around Client.get_adsorbate_slab_relaxations_results() that -handles retries, including re-fetching individual configurations that -are initially omitted.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • system_id – The system ID of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Returns:
-

List of relaxation results, one for each adsorbate configuration in -the system.

-
-
-
- -
-
-async fairchem.demo.ocpapi.wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: tqdm.tqdm | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) Dict[int, fairchem.demo.ocpapi.client.Status]#
-

Blocks until all relaxations in the input system have finished, whether -successfully or not.

-

Relaxations are queued in the API, waiting until machines are ready to -run them. Once started, they can take 1-2 minutes to finish. This method -initially sleeps “slow_interval_sec” seconds between each check for any -relaxations having finished. Once at least one result is ready, subsequent -sleeps are for “fast_interval_sec” seconds.

-
-
Parameters:
-
    -
  • system_id – The ID of the system for which relaxations are running.

  • -
  • check_immediately – If False (default), sleep before the first check -for relaxations having finished. If True, check whether relaxations -have finished immediately on entering this function.

  • -
  • slow_interval_sec – The number of seconds to wait between each check -while all are still running.

  • -
  • fast_interval_sec – The number of seconds to wait between each check -when at least one relaxation has finished in the system.

  • -
  • pbar – A tqdm instance that tracks the number of configurations that -have finished. This will be updated with the number of individual -configurations whose relaxations have finished.

  • -
  • client – The client to use when making API calls.

  • -
-
-
Returns:
-

Map of config IDs in the system to their terminal status.

-
-
-
- -
-
-class fairchem.demo.ocpapi.keep_all_slabs#
-

Adslab filter than returns all slabs.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]])#
-

Adslab filter that keeps any slabs with the configured miller indices. -Slabs with other miller indices will be ignored.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.prompt_for_slabs_to_keep#
-

Adslab filter than presents the user with an interactive prompt to choose -which of the input slabs to keep.

-
-
-static _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) Tuple[Tuple[int, int, int], float, str]#
-

Generates a sort key from the input adslab. Returns the miller indices, -shift, and top/bottom label so that they will be sorted by those values -in that order.

-
- -
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-fairchem.demo.ocpapi.NO_LIMIT: NoLimitType = 0#
-
- -
-
-fairchem.demo.ocpapi.NoLimitType#
-
- -
-
-class fairchem.demo.ocpapi.RateLimitLogging#
-

Controls logging when rate limits are hit.

-
-
-logger: logging.Logger#
-

The logger to use.

-
- -
-
-action: str#
-

A short description of the action being attempted.

-
- -
- -
-
-fairchem.demo.ocpapi.retry_api_calls(max_attempts: int | NoLimitType = 3, rate_limit_logging: RateLimitLogging | None = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) Any#
-

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
Parameters:
-
    -
  • max_attempts – The maximum number of calls to make. If NO_LIMIT, -retries will be made forever.

  • -
  • rate_limit_logging – If not None, log statements will be generated -using this configuration when a rate limit is hit.

  • -
  • fixed_wait_sec – The fixed number of seconds to wait when retrying an -exception that does not include a retry-after value. The default -value is sensible; this is exposed mostly for testing.

  • -
  • max_jitter_sec – The maximum number of seconds that will be randomly -added to wait times. The default value is sensible; this is exposed -mostly for testing.

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/index.html b/autoapi/fairchem/demo/ocpapi/tests/index.html deleted file mode 100644 index 515567289..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/index.html +++ /dev/null @@ -1,646 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests#

-
-

Subpackages#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.html deleted file mode 100644 index 6c95913a7..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/client/index.html +++ /dev/null @@ -1,620 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration.client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.integration.client

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.integration.client#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.html deleted file mode 100644 index 00f9dbfa6..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_client/index.html +++ /dev/null @@ -1,771 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration.client.test_client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.tests.integration.client.test_client#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestClient

Tests that calls to a real server are handled correctly.

-
-
-

Functions#

- - - - - - -

_ensure_system_deleted(→ AsyncGenerator[None, None])

Immediately yields control to the caller. When control returns to this

-
-
-

Attributes#

- - - - - - -

log

-
-
-fairchem.demo.ocpapi.tests.integration.client.test_client.log#
-
- -
-
-async fairchem.demo.ocpapi.tests.integration.client.test_client._ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) AsyncGenerator[None, None]#
-

Immediately yields control to the caller. When control returns to this -function, try to delete the system with the input id.

-
- -
-
-class fairchem.demo.ocpapi.tests.integration.client.test_client.TestClient(methodName='runTest')#
-

Bases: unittest.IsolatedAsyncioTestCase

-

Tests that calls to a real server are handled correctly.

-
-
-CLIENT: fairchem.demo.ocpapi.client.Client#
-
- -
-
-KNOWN_SYSTEM_ID: str = 'f9eacd8f-748c-41dd-ae43-f263dd36d735'#
-
- -
-
-async test_get_models() None#
-
- -
-
-async test_get_bulks() None#
-
- -
-
-async test_get_adsorbates() None#
-
- -
-
-async test_get_slabs() None#
-
- -
-
-async test_get_adsorbate_slab_configs() None#
-
- -
-
-async test_submit_adsorbate_slab_relaxations__gemnet_oc() None#
-
- -
-
-async test_submit_adsorbate_slab_relaxations__equiformer_v2() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_request() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_results__all_fields_and_configs() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_results__limited_fields_and_configs() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.html deleted file mode 100644 index 2e093fbdc..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/client/test_ui/index.html +++ /dev/null @@ -1,668 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration.client.test_ui — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.integration.client.test_ui

- -
- -
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.integration.client.test_ui#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestUI

Tests that calls to a real server are handled correctly.

-
-
-class fairchem.demo.ocpapi.tests.integration.client.test_ui.TestUI(methodName='runTest')#
-

Bases: unittest.TestCase

-

Tests that calls to a real server are handled correctly.

-
-
-API_HOST: str = 'open-catalyst-api.metademolab.com'#
-
- -
-
-KNOWN_SYSTEM_ID: str = 'f9eacd8f-748c-41dd-ae43-f263dd36d735'#
-
- -
-
-test_get_results_ui_url() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/index.html deleted file mode 100644 index f3b7be5cb..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/index.html +++ /dev/null @@ -1,627 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.integration

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.integration#

-
-

Subpackages#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.html deleted file mode 100644 index 995515324..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/index.html +++ /dev/null @@ -1,619 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration.workflows — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.integration.workflows

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.integration.workflows#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.html b/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.html deleted file mode 100644 index 47489f4d2..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/integration/workflows/test_adsorbates/index.html +++ /dev/null @@ -1,682 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestAdsorbates

Tests that workflow methods run against a real server execute correctly.

-
-
-class fairchem.demo.ocpapi.tests.integration.workflows.test_adsorbates.TestAdsorbates(methodName='runTest')#
-

Bases: unittest.IsolatedAsyncioTestCase

-

Tests that workflow methods run against a real server execute correctly.

-
-
-CLIENT: fairchem.demo.ocpapi.client.Client#
-
- -
-
-KNOWN_SYSTEM_ID: str = 'f9eacd8f-748c-41dd-ae43-f263dd36d735'#
-
- -
-
-async test_get_adsorbate_slab_relaxation_results() None#
-
- -
-
-async test_wait_for_adsorbate_slab_relaxations() None#
-
- -
-
-async test_find_adsorbate_binding_sites() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.html deleted file mode 100644 index 3a257a984..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/client/index.html +++ /dev/null @@ -1,621 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.client

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.client#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.html deleted file mode 100644 index 68a774fdd..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_client/index.html +++ /dev/null @@ -1,738 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.client.test_client — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.client.test_client#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestClient

Tests with mocked responses to ensure that they are handled correctly.

-
-
-class fairchem.demo.ocpapi.tests.unit.client.test_client.TestClient(methodName='runTest')#
-

Bases: unittest.IsolatedAsyncioTestCase

-

Tests with mocked responses to ensure that they are handled correctly.

-
-
-async _run_common_tests_against_route(method: str, route: str, client_method_name: str, successful_response_code: int, successful_response_body: str, successful_response_object: fairchem.demo.ocpapi.client.models._DataModel | None, client_method_args: Dict[str, Any] | None = None, expected_request_params: Dict[str, Any] | None = None, expected_request_body: Dict[str, Any] | None = None) None#
-
- -
-
-test_host() None#
-
- -
-
-async test_get_models() None#
-
- -
-
-async test_get_bulks() None#
-
- -
-
-async test_get_adsorbates() None#
-
- -
-
-async test_get_slabs__bulk_by_id() None#
-
- -
-
-async test_get_slabs__bulk_by_obj() None#
-
- -
-
-async test_get_adsorbate_slab_configurations() None#
-
- -
-
-async test_submit_adsorbate_slab_relaxations() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_request() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_results__all_args() None#
-
- -
-
-async test_get_adsorbate_slab_relaxations_results__req_args_only() None#
-
- -
-
-async test_delete_adsorbate_slab_relaxations() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.html deleted file mode 100644 index cc32ae06a..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_models/index.html +++ /dev/null @@ -1,916 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.client.test_models — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.client.test_models#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

ModelTestWrapper

TestModel

Serde tests for the Model data model.

TestModels

Serde tests for the Models data model.

TestBulk

Serde tests for the Bulk data model.

TestBulks

Serde tests for the Bulks data model.

TestAdsorbates

Serde tests for the Adsorbates data model.

TestAtoms

Serde tests for the Atoms data model.

TestSlabMetadata

Serde tests for the SlabMetadata data model.

TestSlab

Serde tests for the Slab data model.

TestSlabs

Serde tests for the Slabs data model.

TestAdsorbateSlabConfigs

Serde tests for the AdsorbateSlabConfigs data model.

TestAdsorbateSlabRelaxationsSystem

Serde tests for the AdsorbateSlabRelaxationsSystem data model.

TestAdsorbateSlabRelaxationsRequest

Serde tests for the AdsorbateSlabRelaxationsRequest data model.

TestAdsorbateSlabRelaxationsRequest_req_fields_only

Serde tests for the AdsorbateSlabRelaxationsRequest data model in which

TestAdsorbateSlabRelaxationResult

Serde tests for the AdsorbateSlabRelaxationResult data model.

TestAdsorbateSlabRelaxationResult_req_fields_only

Serde tests for the AdsorbateSlabRelaxationResult data model in which

TestAdsorbateSlabRelaxationsResults

Serde tests for the AdsorbateSlabRelaxationsResults data model.

-
-
-

Attributes#

- - - - - - -

T

-
-
-fairchem.demo.ocpapi.tests.unit.client.test_models.T#
-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.ModelTestWrapper#
-
-
-class ModelTest(*args: Any, obj: T, obj_json: str, **kwargs: Any)#
-

Bases: unittest.TestCase, Generic[T]

-

Base class for all tests below that assert behavior of data models.

-
-
-test_from_json() None#
-
- -
-
-test_to_json() None#
-
- -
-
-assertJsonEqual(first: str, second: str) None#
-

Compares two JSON-formatted strings by deserializing them and then -comparing the generated built-in types.

-
- -
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestModel(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Model]

-

Serde tests for the Model data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestModels(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Models]

-

Serde tests for the Models data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestBulk(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Bulk]

-

Serde tests for the Bulk data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestBulks(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Bulks]

-

Serde tests for the Bulks data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbates(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Adsorbates]

-

Serde tests for the Adsorbates data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAtoms(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Atoms]

-

Serde tests for the Atoms data model.

-
-
-test_to_ase_atoms() None#
-
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlabMetadata(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.SlabMetadata]

-

Serde tests for the SlabMetadata data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlab(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Slab]

-

Serde tests for the Slab data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestSlabs(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.Slabs]

-

Serde tests for the Slabs data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabConfigs(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]

-

Serde tests for the AdsorbateSlabConfigs data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsSystem(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsSystem]

-

Serde tests for the AdsorbateSlabRelaxationsSystem data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest]

-

Serde tests for the AdsorbateSlabRelaxationsRequest data model.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsRequest_req_fields_only(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsRequest]

-

Serde tests for the AdsorbateSlabRelaxationsRequest data model in which -optional fields are omitted.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]

-

Serde tests for the AdsorbateSlabRelaxationResult data model.

-
-
-test_to_ase_atoms() None#
-
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationResult_req_fields_only(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]

-

Serde tests for the AdsorbateSlabRelaxationResult data model in which -optional fields are omitted.

-
- -
-
-class fairchem.demo.ocpapi.tests.unit.client.test_models.TestAdsorbateSlabRelaxationsResults(*args: Any, **kwargs: Any)#
-

Bases: ModelTestWrapper[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults]

-

Serde tests for the AdsorbateSlabRelaxationsResults data model.

-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.html deleted file mode 100644 index b4275a4dc..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/client/test_ui/index.html +++ /dev/null @@ -1,688 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.client.test_ui — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.client.test_ui

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.client.test_ui#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestUI

A class whose instances are single test cases.

-
-
-class fairchem.demo.ocpapi.tests.unit.client.test_ui.TestUI(methodName='runTest')#
-

Bases: unittest.TestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-test_get_results_ui_url() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/index.html deleted file mode 100644 index fb1950bfd..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/index.html +++ /dev/null @@ -1,631 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit#

-
-

Subpackages#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.html deleted file mode 100644 index 7064c760f..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/index.html +++ /dev/null @@ -1,622 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.workflows — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.workflows

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.workflows#

-
-

Submodules#

- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.html deleted file mode 100644 index 1b6acf118..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_adsorbates/index.html +++ /dev/null @@ -1,804 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

MockGetRelaxationResults

Helper that can be used to mock calls to

TestMockGetRelaxationResults

A class whose instances are single test cases.

TestAdsorbates

A class whose instances are single test cases.

-
-
-exception fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.TestException#
-

Bases: Exception

-

Common base class for all non-exit exceptions.

-
-
-__test__ = False#
-
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.MockGetRelaxationResults(num_configs: int, max_configs_to_return: int, status_to_return: Iterable[fairchem.demo.ocpapi.client.Status] | None = None, raise_on_first_call: Exception | None = None)#
-

Helper that can be used to mock calls to -Client.get_adsorbate_slab_relaxations_results(). This allows for -some configs to be returned with “success” status and others to be -omitted, similar to the behavior in the API.

-
-
-__call__(*args: Any, config_ids: List[int] | None = None, **kwargs: Any) fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationsResults#
-
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.TestMockGetRelaxationResults(methodName='runTest')#
-

Bases: unittest.TestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-test___call__() None#
-
- -
- -
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_adsorbates.TestAdsorbates(methodName='runTest')#
-

Bases: unittest.IsolatedAsyncioTestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-async test_get_adsorbate_slab_relaxation_results() None#
-
- -
-
-async test_wait_for_adsorbate_slab_relaxations() None#
-
- -
-
-async test_find_adsorbate_binding_sites() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.html deleted file mode 100644 index 83d7fe3cc..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_context/index.html +++ /dev/null @@ -1,688 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.workflows.test_context — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_context

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_context#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestContext

A class whose instances are single test cases.

-
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_context.TestContext(methodName='runTest')#
-

Bases: unittest.TestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-test_set_context_var() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.html deleted file mode 100644 index 38007fdc7..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_filter/index.html +++ /dev/null @@ -1,721 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.workflows.test_filter — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_filter

- -
- -
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_filter#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestFilter

A class whose instances are single test cases.

-
-
-

Functions#

- - - - - - -

_new_adslab(...)

-
-
-fairchem.demo.ocpapi.tests.unit.workflows.test_filter._new_adslab(miller_indices: Tuple[int, int, int] | None = None) fairchem.demo.ocpapi.client.AdsorbateSlabConfigs#
-
- -
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_filter.TestFilter(methodName='runTest')#
-

Bases: unittest.IsolatedAsyncioTestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-async test_keep_all_slabs() None#
-
- -
-
-async test_keep_slabs_with_miller_indices() None#
-
- -
-
-async test_prompt_for_slabs_to_keep() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.html b/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.html deleted file mode 100644 index f64608d84..000000000 --- a/autoapi/fairchem/demo/ocpapi/tests/unit/workflows/test_retry/index.html +++ /dev/null @@ -1,750 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.tests.unit.workflows.test_retry — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_retry

- -
- -
-
- - - - -
- -
-

fairchem.demo.ocpapi.tests.unit.workflows.test_retry#

-
-

Module Contents#

-
-

Classes#

- - - - - - -

TestRetry

A class whose instances are single test cases.

-
-
-

Functions#

- - - - - - - - - -

returns(→ Callable[[], T])

raises(→ Callable[[], None])

-
-
-

Attributes#

- - - - - - -

T

-
-
-fairchem.demo.ocpapi.tests.unit.workflows.test_retry.T#
-
- -
-
-fairchem.demo.ocpapi.tests.unit.workflows.test_retry.returns(val: T) Callable[[], T]#
-
- -
-
-fairchem.demo.ocpapi.tests.unit.workflows.test_retry.raises(ex: Exception) Callable[[], None]#
-
- -
-
-class fairchem.demo.ocpapi.tests.unit.workflows.test_retry.TestRetry(methodName='runTest')#
-

Bases: unittest.TestCase

-

A class whose instances are single test cases.

-

By default, the test code itself should be placed in a method named -‘runTest’.

-

If the fixture may be used for many test cases, create as -many test methods as are needed. When instantiating such a TestCase -subclass, specify in the constructor arguments the name of the test method -that the instance is to execute.

-

Test authors should subclass TestCase for their own tests. Construction -and deconstruction of the test’s environment (‘fixture’) can be -implemented by overriding the ‘setUp’ and ‘tearDown’ methods respectively.

-

If it is necessary to override the __init__ method, the base class -__init__ method must always be called. It is important that subclasses -should not change the signature of their __init__ method, since instances -of the classes are instantiated automatically by parts of the framework -in order to be run.

-

When subclassing TestCase, you can set these attributes: -* failureException: determines which exception will be raised when

-
-

the instance’s assertion methods fail; test methods raising this -exception will be deemed to have ‘failed’ rather than ‘errored’.

-
-
    -
  • -
    longMessage: determines whether long messages (including repr of

    objects used in assert methods) will be printed on failure in addition -to any explicit message passed.

    -
    -
    -
  • -
  • -
    maxDiff: sets the maximum length of a diff in failure messages

    by assert methods using difflib. It is looked up as an instance -attribute so can be configured by individual tests if required.

    -
    -
    -
  • -
-
-
-test_retry_api_calls__results() None#
-
- -
-
-test_retry_api_calls__wait() None#
-
- -
-
-test_retry_api_calls__logging() None#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/version/index.html b/autoapi/fairchem/demo/ocpapi/version/index.html deleted file mode 100644 index c010cfdaf..000000000 --- a/autoapi/fairchem/demo/ocpapi/version/index.html +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.version — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.version

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.version#

-
-

Module Contents#

-
-
-fairchem.demo.ocpapi.version.VERSION = '1.0.0'#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.html b/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.html deleted file mode 100644 index ceb62d632..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/adsorbates/index.html +++ /dev/null @@ -1,1323 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows.adsorbates — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.workflows.adsorbates#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

Lifetime

Represents different lifetimes when running relaxations.

AdsorbateSlabRelaxations

Stores the relaxations of adsorbate placements on the surface of a slab.

AdsorbateBindingSites

Stores the inputs and results of a set of relaxations of adsorbate

-
-
-

Functions#

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

_setup_log_record_factory(→ None)

Adds a log record factory that stores information about the currently

_ensure_model_supported(→ None)

Checks that the input model is supported in the API.

_get_bulk_if_supported(→ fairchem.demo.ocpapi.client.Bulk)

Returns the object from the input bulk if it is supported in the API.

_ensure_adsorbate_supported(→ None)

Checks that the input adsorbate is supported in the API.

_get_slabs(→ List[fairchem.demo.ocpapi.client.Slab])

Enumerates surfaces for the input bulk material.

_get_absorbate_configs_on_slab(...)

Generate initial guesses at adsorbate binding sites on the input slab.

_get_absorbate_configs_on_slab_with_logging(...)

Wrapper around _get_absorbate_configs_on_slab that adds logging.

_get_adsorbate_configs_on_slabs(...)

Finds candidate adsorbate binding sites on each of the input slabs.

_submit_relaxations(→ str)

Start relaxations for each of the input adsorbate configurations on the

_submit_relaxations_with_progress_logging(→ str)

Wrapper around _submit_relaxations that adds periodic logging in case

get_adsorbate_slab_relaxation_results(...)

Wrapper around Client.get_adsorbate_slab_relaxations_results() that

wait_for_adsorbate_slab_relaxations(→ Dict[int, ...)

Blocks until all relaxations in the input system have finished, whether

_delete_system(→ None)

Deletes the input system, with retries on failed attempts.

_ensure_system_deleted(→ AsyncGenerator[None, None])

Immediately yields control to the caller. When control returns to this

_run_relaxations_on_slab(→ AdsorbateSlabRelaxations)

Start relaxations for each adsorbate configuration on the input slab

_refresh_pbar(→ None)

Helper function that refreshes the input progress bar on a regular

_relax_binding_sites_on_slabs(→ AdsorbateBindingSites)

Search for adsorbate binding sites on the input slab.

find_adsorbate_binding_sites(→ AdsorbateBindingSites)

Search for adsorbate binding sites on surfaces of a bulk material.

-
-
-

Attributes#

- - - - - - - - - - - - - - - -

_CTX_AD_BULK

_CTX_SLAB

DEFAULT_CLIENT

_DEFAULT_ADSLAB_FILTER

-
-
-fairchem.demo.ocpapi.workflows.adsorbates._CTX_AD_BULK: contextvars.ContextVar[Tuple[str, str]]#
-
- -
-
-fairchem.demo.ocpapi.workflows.adsorbates._CTX_SLAB: contextvars.ContextVar[fairchem.demo.ocpapi.client.Slab]#
-
- -
-
-fairchem.demo.ocpapi.workflows.adsorbates._setup_log_record_factory() None#
-

Adds a log record factory that stores information about the currently -running job on a log message.

-
- -
-
-fairchem.demo.ocpapi.workflows.adsorbates.DEFAULT_CLIENT: fairchem.demo.ocpapi.client.Client#
-
- -
-
-exception fairchem.demo.ocpapi.workflows.adsorbates.AdsorbatesException#
-

Bases: Exception

-

Base exception for all others in this module.

-
- -
-
-exception fairchem.demo.ocpapi.workflows.adsorbates.UnsupportedModelException(model: str, allowed_models: List[str])#
-

Bases: AdsorbatesException

-

Exception raised when a model is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.workflows.adsorbates.UnsupportedBulkException(bulk: str)#
-

Bases: AdsorbatesException

-

Exception raised when a bulk material is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.workflows.adsorbates.UnsupportedAdsorbateException(adsorbate: str)#
-

Bases: AdsorbatesException

-

Exception raised when an adsorbate is not supported in the API.

-
- -
-
-class fairchem.demo.ocpapi.workflows.adsorbates.Lifetime(*args, **kwds)#
-

Bases: enum.Enum

-

Represents different lifetimes when running relaxations.

-
-
-SAVE#
-

The relaxation will be available on API servers indefinitely. It will not -be possible to delete the relaxation in the future.

-
- -
-
-MARK_EPHEMERAL#
-

The relaxation will be saved on API servers, but can be deleted at any time -in the future.

-
- -
-
-DELETE#
-

The relaxation will be deleted from API servers as soon as the results have -been fetched.

-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.adsorbates.AdsorbateSlabRelaxations#
-

Stores the relaxations of adsorbate placements on the surface of a slab.

-
-
-slab: fairchem.demo.ocpapi.client.Slab#
-

The slab on which the adsorbate was placed.

-
- -
-
-configs: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Details of the relaxation of each adsorbate placement, including the -final position.

-
- -
-
-system_id: str#
-

The ID of the system that stores all of the relaxations.

-
- -
-
-api_host: str#
-

The API host on which the relaxations were run.

-
- -
-
-ui_url: str | None#
-

The URL at which results can be visualized.

-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.adsorbates.AdsorbateBindingSites#
-

Stores the inputs and results of a set of relaxations of adsorbate -placements on the surface of a slab.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-bulk: fairchem.demo.ocpapi.client.Bulk#
-

The bulk material that was being modeled.

-
- -
-
-model: str#
-

The type of the model that was run.

-
- -
-
-slabs: List[AdsorbateSlabRelaxations]#
-

The list of slabs that were generated from the bulk structure. Each -contains its own list of adsorbate placements.

-
- -
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._ensure_model_supported(client: fairchem.demo.ocpapi.client.Client, model: str) None#
-

Checks that the input model is supported in the API.

-
-
Parameters:
-
    -
  • client – The client to use when making requests to the API.

  • -
  • model – The model to check.

  • -
-
-
Raises:
-

UnsupportedModelException – If the model is not supported.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._get_bulk_if_supported(client: fairchem.demo.ocpapi.client.Client, bulk: str) fairchem.demo.ocpapi.client.Bulk#
-

Returns the object from the input bulk if it is supported in the API.

-
-
Parameters:
-
    -
  • client – The client to use when making requests to the API.

  • -
  • bulk – The bulk to fetch.

  • -
-
-
Raises:
-

UnsupportedBulkException – If the requested bulk is not supported.

-
-
Returns:
-

Bulk instance for the input type.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._ensure_adsorbate_supported(client: fairchem.demo.ocpapi.client.Client, adsorbate: str) None#
-

Checks that the input adsorbate is supported in the API.

-
-
Parameters:
-
    -
  • client – The client to use when making requests to the API.

  • -
  • adsorbate – The adsorbate to check.

  • -
-
-
Raises:
-

UnsupportedAdsorbateException – If the adsorbate is not supported.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._get_slabs(client: fairchem.demo.ocpapi.client.Client, bulk: fairchem.demo.ocpapi.client.Bulk) List[fairchem.demo.ocpapi.client.Slab]#
-

Enumerates surfaces for the input bulk material.

-
-
Parameters:
-
    -
  • client – The client to use when making requests to the API.

  • -
  • bulk – The bulk material from which slabs will be generated.

  • -
-
-
Returns:
-

The list of slabs that were generated.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) fairchem.demo.ocpapi.client.AdsorbateSlabConfigs#
-

Generate initial guesses at adsorbate binding sites on the input slab.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • adsorbate – Description of the adsorbate to place.

  • -
  • slab – The slab on which the adsorbate should be placed.

  • -
-
-
Returns:
-

An updated slab instance that has had tags applied to it and a list -of Atoms objects, each with the positions of the adsorbate atoms on -one of the candidate binding sites.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._get_absorbate_configs_on_slab_with_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slab: fairchem.demo.ocpapi.client.Slab) fairchem.demo.ocpapi.client.AdsorbateSlabConfigs#
-

Wrapper around _get_absorbate_configs_on_slab that adds logging.

-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._get_adsorbate_configs_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, slabs: List[fairchem.demo.ocpapi.client.Slab]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-

Finds candidate adsorbate binding sites on each of the input slabs.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • adsorbate – Description of the adsorbate to place.

  • -
  • slabs – The slabs on which the adsorbate should be placed.

  • -
-
-
Returns:
-

List of slabs and, for each, the positions of the adsorbate -atoms in the potential binding site.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._submit_relaxations(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) str#
-

Start relaxations for each of the input adsorbate configurations on the -input slab.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • adsorbate – Description of the adsorbate to place.

  • -
  • adsorbate_configs – Positions of the adsorbate on the slab. Each -will be relaxed independently.

  • -
  • bulk – The bulk material from which the slab was generated.

  • -
  • slab – The slab that should be searched for adsorbate binding sites.

  • -
  • model – The model to use when evaluating forces and energies.

  • -
  • ephemeral – Whether the relaxations should be marked as ephemeral.

  • -
-
-
Returns:
-

The system ID of the relaxation run, which can be used to fetch results -as they become available.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._submit_relaxations_with_progress_logging(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, ephemeral: bool) str#
-

Wrapper around _submit_relaxations that adds periodic logging in case -calls to submit relaxations are being rate limited.

-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates.get_adsorbate_slab_relaxation_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Wrapper around Client.get_adsorbate_slab_relaxations_results() that -handles retries, including re-fetching individual configurations that -are initially omitted.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • system_id – The system ID of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Returns:
-

List of relaxation results, one for each adsorbate configuration in -the system.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates.wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: tqdm.tqdm | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) Dict[int, fairchem.demo.ocpapi.client.Status]#
-

Blocks until all relaxations in the input system have finished, whether -successfully or not.

-

Relaxations are queued in the API, waiting until machines are ready to -run them. Once started, they can take 1-2 minutes to finish. This method -initially sleeps “slow_interval_sec” seconds between each check for any -relaxations having finished. Once at least one result is ready, subsequent -sleeps are for “fast_interval_sec” seconds.

-
-
Parameters:
-
    -
  • system_id – The ID of the system for which relaxations are running.

  • -
  • check_immediately – If False (default), sleep before the first check -for relaxations having finished. If True, check whether relaxations -have finished immediately on entering this function.

  • -
  • slow_interval_sec – The number of seconds to wait between each check -while all are still running.

  • -
  • fast_interval_sec – The number of seconds to wait between each check -when at least one relaxation has finished in the system.

  • -
  • pbar – A tqdm instance that tracks the number of configurations that -have finished. This will be updated with the number of individual -configurations whose relaxations have finished.

  • -
  • client – The client to use when making API calls.

  • -
-
-
Returns:
-

Map of config IDs in the system to their terminal status.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._delete_system(client: fairchem.demo.ocpapi.client.Client, system_id: str) None#
-

Deletes the input system, with retries on failed attempts.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • system_id – The ID of the system to delete.

  • -
-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._ensure_system_deleted(client: fairchem.demo.ocpapi.client.Client, system_id: str) AsyncGenerator[None, None]#
-

Immediately yields control to the caller. When control returns to this -function, try to delete the system with the input id.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • system_id – The ID of the system to delete.

  • -
-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._run_relaxations_on_slab(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, adsorbate_configs: List[fairchem.demo.ocpapi.client.Atoms], bulk: fairchem.demo.ocpapi.client.Bulk, slab: fairchem.demo.ocpapi.client.Slab, model: str, lifetime: Lifetime, pbar: tqdm.tqdm) AdsorbateSlabRelaxations#
-

Start relaxations for each adsorbate configuration on the input slab -and wait for all to finish.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • adsorbate – Description of the adsorbate to place.

  • -
  • adsorbate_configs – The positions of atoms in each adsorbate placement -to be relaxed.

  • -
  • bulk – The bulk material from which the slab was generated.

  • -
  • slab – The slab that should be searched for adsorbate binding sites.

  • -
  • model – The model to use when evaluating forces and energies.

  • -
  • lifetime – Whether relaxations should be saved on the server, be marked -as ephemeral (allowing them to deleted in the future), or deleted -immediately.

  • -
  • pbar – A progress bar to update as relaxations finish.

  • -
-
-
Returns:
-

Details of each adsorbate placement, including its relaxed position.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._refresh_pbar(pbar: tqdm.tqdm, interval_sec: float) None#
-

Helper function that refreshes the input progress bar on a regular -schedule. This function never returns; it must be cancelled.

-
-
Parameters:
-
    -
  • pbar – The progress bar to refresh.

  • -
  • interval_sec – The number of seconds to wait between each refresh.

  • -
-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates._relax_binding_sites_on_slabs(client: fairchem.demo.ocpapi.client.Client, adsorbate: str, bulk: fairchem.demo.ocpapi.client.Bulk, adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs], model: str, lifetime: Lifetime) AdsorbateBindingSites#
-

Search for adsorbate binding sites on the input slab.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • adsorbate – Description of the adsorbate to place.

  • -
  • bulk – The bulk material from which the slab was generated.

  • -
  • adslabs – The slabs and, for each, the binding sites that should be -relaxed.

  • -
  • model – The model to use when evaluating forces and energies.

  • -
  • lifetime – Whether relaxations should be saved on the server, be marked -as ephemeral (allowing them to deleted in the future), or deleted -immediately.

  • -
-
-
Returns:
-

Details of each adsorbate placement, including its relaxed position.

-
-
-
- -
-
-fairchem.demo.ocpapi.workflows.adsorbates._DEFAULT_ADSLAB_FILTER: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]]#
-
- -
-
-async fairchem.demo.ocpapi.workflows.adsorbates.find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) AdsorbateBindingSites#
-

Search for adsorbate binding sites on surfaces of a bulk material. -This executes the following steps:

-
-
    -
  1. Ensure that both the adsorbate and bulk are supported in the -OCP API.

  2. -
  3. Enumerate unique surfaces from the bulk material.

  4. -
  5. Enumerate likely binding sites for the input adsorbate on each -of the generated surfaces.

  6. -
  7. -
    Filter the list of generated adsorbate/slab (adslab) configurations

    using the input adslab_filter.

    -
    -
    -
  8. -
  9. Relax each generated surface+adsorbate structure by refining -atomic positions to minimize forces generated by the input model.

  10. -
-
-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate to place.

  • -
  • bulk – The ID (typically Materials Project MP ID) of the bulk material -on which the adsorbate will be placed.

  • -
  • model – The type of the model to use when calculating forces during -relaxations.

  • -
  • adslab_filter – A function that modifies the set of adsorbate/slab -configurations that will be relaxed. This can be used to subselect -slabs and/or adsorbate configurations.

  • -
  • client – The OCP API client to use.

  • -
  • lifetime – Whether relaxations should be saved on the server, be marked -as ephemeral (allowing them to deleted in the future), or deleted -immediately.

  • -
-
-
Returns:
-

Details of each adsorbate binding site, including results of relaxing -to locally-optimized positions using the input model.

-
-
Raises:
-
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/context/index.html b/autoapi/fairchem/demo/ocpapi/workflows/context/index.html deleted file mode 100644 index 6586a63e8..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/context/index.html +++ /dev/null @@ -1,652 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows.context — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.workflows.context

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.workflows.context#

-
-

Module Contents#

-
-

Functions#

- - - - - - -

set_context_var(→ Generator[None, None, None])

Sets the input convext variable to the input value and yields control

-
-
-fairchem.demo.ocpapi.workflows.context.set_context_var(context_var: contextvars.ContextVar, value: Any) Generator[None, None, None]#
-

Sets the input convext variable to the input value and yields control -back to the caller. When control returns to this function, the context -variable is reset to its original value.

-
-
Parameters:
-
    -
  • context_var – The context variable to set.

  • -
  • value – The value to assign to the variable.

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/filter/index.html b/autoapi/fairchem/demo/ocpapi/workflows/filter/index.html deleted file mode 100644 index c560befe2..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/filter/index.html +++ /dev/null @@ -1,709 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows.filter — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.workflows.filter#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - - - - -

keep_all_slabs

Adslab filter than returns all slabs.

keep_slabs_with_miller_indices

Adslab filter that keeps any slabs with the configured miller indices.

prompt_for_slabs_to_keep

Adslab filter than presents the user with an interactive prompt to choose

-
-
-class fairchem.demo.ocpapi.workflows.filter.keep_all_slabs#
-

Adslab filter than returns all slabs.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.filter.keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]])#
-

Adslab filter that keeps any slabs with the configured miller indices. -Slabs with other miller indices will be ignored.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.filter.prompt_for_slabs_to_keep#
-

Adslab filter than presents the user with an interactive prompt to choose -which of the input slabs to keep.

-
-
-static _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) Tuple[Tuple[int, int, int], float, str]#
-

Generates a sort key from the input adslab. Returns the miller indices, -shift, and top/bottom label so that they will be sorted by those values -in that order.

-
- -
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/index.html b/autoapi/fairchem/demo/ocpapi/workflows/index.html deleted file mode 100644 index 98fd99b16..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/index.html +++ /dev/null @@ -1,1119 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.workflows#

-
-

Submodules#

- -
-
-

Package Contents#

-
-

Classes#

- - - - - - - - - - - - - - - - - - - - - - - - -

AdsorbateBindingSites

Stores the inputs and results of a set of relaxations of adsorbate

AdsorbateSlabRelaxations

Stores the relaxations of adsorbate placements on the surface of a slab.

Lifetime

Represents different lifetimes when running relaxations.

keep_all_slabs

Adslab filter than returns all slabs.

keep_slabs_with_miller_indices

Adslab filter that keeps any slabs with the configured miller indices.

prompt_for_slabs_to_keep

Adslab filter than presents the user with an interactive prompt to choose

RateLimitLogging

Controls logging when rate limits are hit.

-
-
-

Functions#

- - - - - - - - - - - - - - - -

find_adsorbate_binding_sites(→ AdsorbateBindingSites)

Search for adsorbate binding sites on surfaces of a bulk material.

get_adsorbate_slab_relaxation_results(...)

Wrapper around Client.get_adsorbate_slab_relaxations_results() that

wait_for_adsorbate_slab_relaxations(→ Dict[int, ...)

Blocks until all relaxations in the input system have finished, whether

retry_api_calls(→ Any)

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
-

Attributes#

- - - - - - - - - -

NO_LIMIT

NoLimitType

-
-
-class fairchem.demo.ocpapi.workflows.AdsorbateBindingSites#
-

Stores the inputs and results of a set of relaxations of adsorbate -placements on the surface of a slab.

-
-
-adsorbate: str#
-

Description of the adsorbate.

-
- -
-
-bulk: fairchem.demo.ocpapi.client.Bulk#
-

The bulk material that was being modeled.

-
- -
-
-model: str#
-

The type of the model that was run.

-
- -
-
-slabs: List[AdsorbateSlabRelaxations]#
-

The list of slabs that were generated from the bulk structure. Each -contains its own list of adsorbate placements.

-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.AdsorbateSlabRelaxations#
-

Stores the relaxations of adsorbate placements on the surface of a slab.

-
-
-slab: fairchem.demo.ocpapi.client.Slab#
-

The slab on which the adsorbate was placed.

-
- -
-
-configs: List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Details of the relaxation of each adsorbate placement, including the -final position.

-
- -
-
-system_id: str#
-

The ID of the system that stores all of the relaxations.

-
- -
-
-api_host: str#
-

The API host on which the relaxations were run.

-
- -
-
-ui_url: str | None#
-

The URL at which results can be visualized.

-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.Lifetime(*args, **kwds)#
-

Bases: enum.Enum

-

Represents different lifetimes when running relaxations.

-
-
-SAVE#
-

The relaxation will be available on API servers indefinitely. It will not -be possible to delete the relaxation in the future.

-
- -
-
-MARK_EPHEMERAL#
-

The relaxation will be saved on API servers, but can be deleted at any time -in the future.

-
- -
-
-DELETE#
-

The relaxation will be deleted from API servers as soon as the results have -been fetched.

-
- -
- -
-
-exception fairchem.demo.ocpapi.workflows.UnsupportedAdsorbateException(adsorbate: str)#
-

Bases: AdsorbatesException

-

Exception raised when an adsorbate is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.workflows.UnsupportedBulkException(bulk: str)#
-

Bases: AdsorbatesException

-

Exception raised when a bulk material is not supported in the API.

-
- -
-
-exception fairchem.demo.ocpapi.workflows.UnsupportedModelException(model: str, allowed_models: List[str])#
-

Bases: AdsorbatesException

-

Exception raised when a model is not supported in the API.

-
- -
-
-async fairchem.demo.ocpapi.workflows.find_adsorbate_binding_sites(adsorbate: str, bulk: str, model: str = 'equiformer_v2_31M_s2ef_all_md', adslab_filter: Callable[[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]], Awaitable[List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]]] = _DEFAULT_ADSLAB_FILTER, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT, lifetime: Lifetime = Lifetime.SAVE) AdsorbateBindingSites#
-

Search for adsorbate binding sites on surfaces of a bulk material. -This executes the following steps:

-
-
    -
  1. Ensure that both the adsorbate and bulk are supported in the -OCP API.

  2. -
  3. Enumerate unique surfaces from the bulk material.

  4. -
  5. Enumerate likely binding sites for the input adsorbate on each -of the generated surfaces.

  6. -
  7. -
    Filter the list of generated adsorbate/slab (adslab) configurations

    using the input adslab_filter.

    -
    -
    -
  8. -
  9. Relax each generated surface+adsorbate structure by refining -atomic positions to minimize forces generated by the input model.

  10. -
-
-
-
Parameters:
-
    -
  • adsorbate – Description of the adsorbate to place.

  • -
  • bulk – The ID (typically Materials Project MP ID) of the bulk material -on which the adsorbate will be placed.

  • -
  • model – The type of the model to use when calculating forces during -relaxations.

  • -
  • adslab_filter – A function that modifies the set of adsorbate/slab -configurations that will be relaxed. This can be used to subselect -slabs and/or adsorbate configurations.

  • -
  • client – The OCP API client to use.

  • -
  • lifetime – Whether relaxations should be saved on the server, be marked -as ephemeral (allowing them to deleted in the future), or deleted -immediately.

  • -
-
-
Returns:
-

Details of each adsorbate binding site, including results of relaxing -to locally-optimized positions using the input model.

-
-
Raises:
-
-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.get_adsorbate_slab_relaxation_results(system_id: str, config_ids: List[int] | None = None, fields: List[str] | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) List[fairchem.demo.ocpapi.client.AdsorbateSlabRelaxationResult]#
-

Wrapper around Client.get_adsorbate_slab_relaxations_results() that -handles retries, including re-fetching individual configurations that -are initially omitted.

-
-
Parameters:
-
    -
  • client – The client to use when making API calls.

  • -
  • system_id – The system ID of the relaxations.

  • -
  • config_ids – If defined and not empty, a subset of configurations -to fetch. Otherwise all configurations are returned.

  • -
  • fields – If defined and not empty, a subset of fields in each -configuration to fetch. Otherwise all fields are returned.

  • -
-
-
Returns:
-

List of relaxation results, one for each adsorbate configuration in -the system.

-
-
-
- -
-
-async fairchem.demo.ocpapi.workflows.wait_for_adsorbate_slab_relaxations(system_id: str, check_immediately: bool = False, slow_interval_sec: float = 30, fast_interval_sec: float = 10, pbar: tqdm.tqdm | None = None, client: fairchem.demo.ocpapi.client.Client = DEFAULT_CLIENT) Dict[int, fairchem.demo.ocpapi.client.Status]#
-

Blocks until all relaxations in the input system have finished, whether -successfully or not.

-

Relaxations are queued in the API, waiting until machines are ready to -run them. Once started, they can take 1-2 minutes to finish. This method -initially sleeps “slow_interval_sec” seconds between each check for any -relaxations having finished. Once at least one result is ready, subsequent -sleeps are for “fast_interval_sec” seconds.

-
-
Parameters:
-
    -
  • system_id – The ID of the system for which relaxations are running.

  • -
  • check_immediately – If False (default), sleep before the first check -for relaxations having finished. If True, check whether relaxations -have finished immediately on entering this function.

  • -
  • slow_interval_sec – The number of seconds to wait between each check -while all are still running.

  • -
  • fast_interval_sec – The number of seconds to wait between each check -when at least one relaxation has finished in the system.

  • -
  • pbar – A tqdm instance that tracks the number of configurations that -have finished. This will be updated with the number of individual -configurations whose relaxations have finished.

  • -
  • client – The client to use when making API calls.

  • -
-
-
Returns:
-

Map of config IDs in the system to their terminal status.

-
-
-
- -
-
-class fairchem.demo.ocpapi.workflows.keep_all_slabs#
-

Adslab filter than returns all slabs.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.keep_slabs_with_miller_indices(miller_indices: Iterable[Tuple[int, int, int]])#
-

Adslab filter that keeps any slabs with the configured miller indices. -Slabs with other miller indices will be ignored.

-
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.prompt_for_slabs_to_keep#
-

Adslab filter than presents the user with an interactive prompt to choose -which of the input slabs to keep.

-
-
-static _sort_key(adslab: fairchem.demo.ocpapi.client.AdsorbateSlabConfigs) Tuple[Tuple[int, int, int], float, str]#
-

Generates a sort key from the input adslab. Returns the miller indices, -shift, and top/bottom label so that they will be sorted by those values -in that order.

-
- -
-
-async __call__(adslabs: List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]) List[fairchem.demo.ocpapi.client.AdsorbateSlabConfigs]#
-
- -
- -
-
-fairchem.demo.ocpapi.workflows.NO_LIMIT: NoLimitType = 0#
-
- -
-
-fairchem.demo.ocpapi.workflows.NoLimitType#
-
- -
-
-class fairchem.demo.ocpapi.workflows.RateLimitLogging#
-

Controls logging when rate limits are hit.

-
-
-logger: logging.Logger#
-

The logger to use.

-
- -
-
-action: str#
-

A short description of the action being attempted.

-
- -
- -
-
-fairchem.demo.ocpapi.workflows.retry_api_calls(max_attempts: int | NoLimitType = 3, rate_limit_logging: RateLimitLogging | None = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) Any#
-

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
Parameters:
-
    -
  • max_attempts – The maximum number of calls to make. If NO_LIMIT, -retries will be made forever.

  • -
  • rate_limit_logging – If not None, log statements will be generated -using this configuration when a rate limit is hit.

  • -
  • fixed_wait_sec – The fixed number of seconds to wait when retrying an -exception that does not include a retry-after value. The default -value is sensible; this is exposed mostly for testing.

  • -
  • max_jitter_sec – The maximum number of seconds that will be randomly -added to wait times. The default value is sensible; this is exposed -mostly for testing.

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/log/index.html b/autoapi/fairchem/demo/ocpapi/workflows/log/index.html deleted file mode 100644 index 1d240f0f0..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/log/index.html +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows.log — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - -
-

fairchem.demo.ocpapi.workflows.log

- -
-
- -
-

Contents

-
- -
-
-
- - - - -
- -
-

fairchem.demo.ocpapi.workflows.log#

-
-

Module Contents#

-
-
-fairchem.demo.ocpapi.workflows.log.log#
-
- -
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - -
- - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/fairchem/demo/ocpapi/workflows/retry/index.html b/autoapi/fairchem/demo/ocpapi/workflows/retry/index.html deleted file mode 100644 index c62f09325..000000000 --- a/autoapi/fairchem/demo/ocpapi/workflows/retry/index.html +++ /dev/null @@ -1,754 +0,0 @@ - - - - - - - - - - - fairchem.demo.ocpapi.workflows.retry — FAIR Chemistry Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
-
-
-
-
- - - - -
-
- - - -
- - - - - - - - - - - - - -
- -
- - - -
- -
-
- -
-
- -
- -
- -
- - -
- -
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- -
-
- - - - - - - - -
- -
-

fairchem.demo.ocpapi.workflows.retry#

-
-

Module Contents#

-
-

Classes#

- - - - - - - - - -

RateLimitLogging

Controls logging when rate limits are hit.

_wait_check_retry_after

Tenacity wait strategy that first checks whether RateLimitExceededException

-
-
-

Functions#

- - - - - - -

retry_api_calls(→ Any)

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
-

Attributes#

- - - - - - - - - -

NoLimitType

NO_LIMIT

-
-
-class fairchem.demo.ocpapi.workflows.retry.RateLimitLogging#
-

Controls logging when rate limits are hit.

-
-
-logger: logging.Logger#
-

The logger to use.

-
- -
-
-action: str#
-

A short description of the action being attempted.

-
- -
- -
-
-class fairchem.demo.ocpapi.workflows.retry._wait_check_retry_after(default_wait: tenacity.wait.wait_base, rate_limit_logging: RateLimitLogging | None = None)#
-

Bases: tenacity.wait.wait_base

-

Tenacity wait strategy that first checks whether RateLimitExceededException -was raised and that it includes a retry-after value; if so wait, for that -amount of time. Otherwise, fall back to the provided default strategy.

-
-
-__call__(retry_state: tenacity.RetryCallState) float#
-

If a RateLimitExceededException was raised and has a retry_after value, -return it. Otherwise use the default waiter method.

-
- -
- -
-
-fairchem.demo.ocpapi.workflows.retry.NoLimitType#
-
- -
-
-fairchem.demo.ocpapi.workflows.retry.NO_LIMIT: NoLimitType = 0#
-
- -
-
-fairchem.demo.ocpapi.workflows.retry.retry_api_calls(max_attempts: int | NoLimitType = 3, rate_limit_logging: RateLimitLogging | None = None, fixed_wait_sec: float = 2, max_jitter_sec: float = 1) Any#
-

Decorator with sensible defaults for retrying calls to the OCP API.

-
-
Parameters:
-
    -
  • max_attempts – The maximum number of calls to make. If NO_LIMIT, -retries will be made forever.

  • -
  • rate_limit_logging – If not None, log statements will be generated -using this configuration when a rate limit is hit.

  • -
  • fixed_wait_sec – The fixed number of seconds to wait when retrying an -exception that does not include a retry-after value. The default -value is sensible; this is exposed mostly for testing.

  • -
  • max_jitter_sec – The maximum number of seconds that will be randomly -added to wait times. The default value is sensible; this is exposed -mostly for testing.

  • -
-
-
-
- -
-
-
- - - - -
- - - - - - -
- -
-
-
- -
- - - - - - -
-
- - -
- - -
-
-
- - - - - -
-
- - \ No newline at end of file diff --git a/autoapi/index.html b/autoapi/index.html index a942993db..6dfc53b5c 100644 --- a/autoapi/index.html +++ b/autoapi/index.html @@ -58,10 +58,12 @@ + + - + @@ -181,7 +183,7 @@

fairchem documentation