diff --git a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt
index 0eeb8ebcd2..5ee1b7f48f 100644
--- a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt
+++ b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt
@@ -1,6 +1,6 @@
-2024-10-23 20:33:22 (INFO): Running in local mode without elastic launch (single gpu only)
-2024-10-23 20:33:22 (INFO): Setting env PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
-2024-10-23 20:33:22 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem
+2024-11-06 17:40:00 (INFO): Running in local mode without elastic launch (single gpu only)
+2024-11-06 17:40:00 (INFO): Setting env PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
+2024-11-06 17:40:00 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/escn/so3.py:23: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
_Jd = torch.load(os.path.join(os.path.dirname(__file__), "Jd.pt"))
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/scn/spherical_harmonics.py:23: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
@@ -15,17 +15,17 @@
@torch.cuda.amp.autocast(enabled=False)
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/equiformer_v2/layer_norm.py:357: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
@torch.cuda.amp.autocast(enabled=False)
-2024-10-23 20:33:23 (INFO): amp: false
+2024-11-06 17:40:01 (INFO): amp: false
cmd:
- checkpoint_dir: fine-tuning/checkpoints/2024-10-23-20-33-04-ft-oxides
- commit: da93cfe
+ checkpoint_dir: fine-tuning/checkpoints/2024-11-06-17-40-16-ft-oxides
+ commit: 6329e92
identifier: ft-oxides
- logs_dir: fine-tuning/logs/tensorboard/2024-10-23-20-33-04-ft-oxides
+ logs_dir: fine-tuning/logs/tensorboard/2024-11-06-17-40-16-ft-oxides
print_every: 10
- results_dir: fine-tuning/results/2024-10-23-20-33-04-ft-oxides
+ results_dir: fine-tuning/results/2024-11-06-17-40-16-ft-oxides
seed: 0
- timestamp_id: 2024-10-23-20-33-04-ft-oxides
- version: 0.1.dev1+gda93cfe
+ timestamp_id: 2024-11-06-17-40-16-ft-oxides
+ version: 0.1.dev1+g6329e92
dataset:
a2g_args:
r_energy: true
@@ -155,67 +155,70 @@ val_dataset:
format: ase_db
src: val.db
-2024-10-23 20:33:23 (INFO): Loading model: gemnet_oc
-2024-10-23 20:33:23 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization']
-2024-10-23 20:33:25 (INFO): Loaded GemNetOC with 38864438 parameters.
-2024-10-23 20:33:25 (WARNING): log_summary for Tensorboard not supported
-2024-10-23 20:33:26 (INFO): Loading dataset: ase_db
-2024-10-23 20:33:26 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('train.db')]'
-2024-10-23 20:33:26 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
-2024-10-23 20:33:26 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
-2024-10-23 20:33:26 (INFO): rank: 0: Sampler created...
-2024-10-23 20:33:26 (INFO): Created BalancedBatchSampler with sampler=, batch_size=4, drop_last=False
-2024-10-23 20:33:26 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('val.db')]'
-2024-10-23 20:33:26 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
-2024-10-23 20:33:26 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
-2024-10-23 20:33:26 (INFO): rank: 0: Sampler created...
-2024-10-23 20:33:26 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
-2024-10-23 20:33:26 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('test.db')]'
-2024-10-23 20:33:26 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
-2024-10-23 20:33:26 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
-2024-10-23 20:33:26 (INFO): rank: 0: Sampler created...
-2024-10-23 20:33:26 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
-2024-10-23 20:33:26 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated.
-2024-10-23 20:33:26 (INFO): Attemping to load user specified checkpoint at /tmp/fairchem_checkpoints/gnoc_oc22_oc20_all_s2ef.pt
-2024-10-23 20:33:26 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gnoc_oc22_oc20_all_s2ef.pt
-/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:603: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
+2024-11-06 17:40:01 (INFO): Loading model: gemnet_oc
+2024-11-06 17:40:01 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization']
+2024-11-06 17:40:03 (INFO): Loaded GemNetOC with 38864438 parameters.
+2024-11-06 17:40:03 (WARNING): log_summary for Tensorboard not supported
+2024-11-06 17:40:03 (INFO): Loading dataset: ase_db
+2024-11-06 17:40:03 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('train.db')]'
+2024-11-06 17:40:03 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
+2024-11-06 17:40:03 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
+2024-11-06 17:40:03 (INFO): rank: 0: Sampler created...
+2024-11-06 17:40:03 (INFO): Created BalancedBatchSampler with sampler=, batch_size=4, drop_last=False
+2024-11-06 17:40:03 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('val.db')]'
+2024-11-06 17:40:03 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
+2024-11-06 17:40:03 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
+2024-11-06 17:40:03 (INFO): rank: 0: Sampler created...
+2024-11-06 17:40:03 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
+2024-11-06 17:40:03 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('test.db')]'
+2024-11-06 17:40:03 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
+2024-11-06 17:40:03 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
+2024-11-06 17:40:03 (INFO): rank: 0: Sampler created...
+2024-11-06 17:40:03 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
+2024-11-06 17:40:03 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated.
+2024-11-06 17:40:04 (INFO): Attemping to load user specified checkpoint at /tmp/fairchem_checkpoints/gnoc_oc22_oc20_all_s2ef.pt
+2024-11-06 17:40:04 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gnoc_oc22_oc20_all_s2ef.pt
+/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:601: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
checkpoint = torch.load(checkpoint_path, map_location=map_location)
-2024-10-23 20:33:26 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint.
+2024-11-06 17:40:04 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint.
/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py:155: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/gemnet_oc/gemnet_oc.py:1270: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(False):
-2024-10-23 20:33:50 (INFO): energy_mae: 9.48e+00, forcesx_mae: 7.26e-02, forcesy_mae: 3.95e-02, forcesz_mae: 5.74e-02, forces_mae: 5.65e-02, forces_cosine_similarity: 1.11e-01, forces_magnitude_error: 1.11e-01, energy_forces_within_threshold: 0.00e+00, loss: 9.61e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01
-2024-10-23 20:33:51 (INFO): Evaluating on val.
-
device 0: 0%| | 0/2 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:887: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
+2024-11-06 17:40:28 (INFO): energy_mae: 9.48e+00, forcesx_mae: 7.26e-02, forcesy_mae: 3.95e-02, forcesz_mae: 5.74e-02, forces_mae: 5.65e-02, forces_cosine_similarity: 1.12e-01, forces_magnitude_error: 1.11e-01, energy_forces_within_threshold: 0.00e+00, loss: 9.61e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01
+2024-11-06 17:40:29 (INFO): Evaluating on val.
+
device 0: 0%| | 0/2 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:884: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
-
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.82s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.42s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.54s/it]
-2024-10-23 20:33:59 (INFO): energy_mae: 19.3096, forcesx_mae: 0.2055, forcesy_mae: 0.1585, forcesz_mae: 0.1641, forces_mae: 0.1761, forces_cosine_similarity: -0.0774, forces_magnitude_error: 0.3616, energy_forces_within_threshold: 0.0000, loss: 19.5661, epoch: 0.1695
-2024-10-23 20:33:59 (INFO): Predicting on test.
-
device 0: 0%| | 0/2 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py:453: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
+
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.61s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.32s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.42s/it]
+2024-11-06 17:40:36 (INFO): energy_mae: 19.3128, forcesx_mae: 0.2055, forcesy_mae: 0.1585, forcesz_mae: 0.1641, forces_mae: 0.1760, forces_cosine_similarity: -0.0775, forces_magnitude_error: 0.3616, energy_forces_within_threshold: 0.0000, loss: 19.5692, epoch: 0.1695
+2024-11-06 17:40:36 (INFO): Predicting on test.
+
device 0: 0%| | 0/2 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py:461: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
-
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.29s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.65s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.81s/it]
-2024-10-23 20:34:05 (INFO): Writing results to fine-tuning/results/2024-10-23-20-33-04-ft-oxides/ocp_predictions.npz
-2024-10-23 20:34:26 (INFO): energy_mae: 7.51e+00, forcesx_mae: 5.95e-02, forcesy_mae: 5.59e-02, forcesz_mae: 4.45e-02, forces_mae: 5.33e-02, forces_cosine_similarity: -6.35e-03, forces_magnitude_error: 1.04e-01, energy_forces_within_threshold: 0.00e+00, loss: 7.63e+00, lr: 5.00e-04, epoch: 3.39e-01, step: 2.00e+01
-2024-10-23 20:34:28 (INFO): Evaluating on val.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.70s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.40s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.51s/it]
-2024-10-23 20:34:35 (INFO): energy_mae: 6.5983, forcesx_mae: 0.0250, forcesy_mae: 0.0255, forcesz_mae: 0.0185, forces_mae: 0.0230, forces_cosine_similarity: -0.0912, forces_magnitude_error: 0.0278, energy_forces_within_threshold: 0.0000, loss: 6.6208, epoch: 0.3390
-2024-10-23 20:34:35 (INFO): Predicting on test.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.46s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.71s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.90s/it]
-2024-10-23 20:34:41 (INFO): Writing results to fine-tuning/results/2024-10-23-20-33-04-ft-oxides/ocp_predictions.npz
-2024-10-23 20:35:06 (INFO): energy_mae: 9.33e+00, forcesx_mae: 3.94e-02, forcesy_mae: 3.02e-02, forcesz_mae: 3.53e-02, forces_mae: 3.50e-02, forces_cosine_similarity: -1.19e-01, forces_magnitude_error: 4.86e-02, energy_forces_within_threshold: 0.00e+00, loss: 9.39e+00, lr: 5.00e-04, epoch: 5.08e-01, step: 3.00e+01
-2024-10-23 20:35:08 (INFO): Evaluating on val.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.80s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.42s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.54s/it]
-2024-10-23 20:35:15 (INFO): energy_mae: 4.8628, forcesx_mae: 0.0179, forcesy_mae: 0.0211, forcesz_mae: 0.0139, forces_mae: 0.0177, forces_cosine_similarity: 0.0623, forces_magnitude_error: 0.0277, energy_forces_within_threshold: 0.0000, loss: 4.9767, epoch: 0.5085
-2024-10-23 20:35:16 (INFO): Predicting on test.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.51s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.75s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.95s/it]
-2024-10-23 20:35:22 (INFO): Writing results to fine-tuning/results/2024-10-23-20-33-04-ft-oxides/ocp_predictions.npz
-2024-10-23 20:35:47 (INFO): energy_mae: 4.62e+00, forcesx_mae: 2.75e-02, forcesy_mae: 2.09e-02, forcesz_mae: 2.19e-02, forces_mae: 2.34e-02, forces_cosine_similarity: 2.88e-02, forces_magnitude_error: 2.98e-02, energy_forces_within_threshold: 0.00e+00, loss: 4.67e+00, lr: 5.00e-04, epoch: 6.78e-01, step: 4.00e+01
-2024-10-23 20:35:49 (INFO): Evaluating on val.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.78s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.55s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.66s/it]
-2024-10-23 20:35:56 (INFO): energy_mae: 23.1495, forcesx_mae: 0.0276, forcesy_mae: 0.0294, forcesz_mae: 0.0232, forces_mae: 0.0267, forces_cosine_similarity: -0.0002, forces_magnitude_error: 0.0334, energy_forces_within_threshold: 0.0000, loss: 23.2945, epoch: 0.6780
-2024-10-23 20:36:18 (INFO): energy_mae: 5.17e+00, forcesx_mae: 2.46e-02, forcesy_mae: 2.55e-02, forcesz_mae: 2.63e-02, forces_mae: 2.55e-02, forces_cosine_similarity: -1.32e-02, forces_magnitude_error: 3.43e-02, energy_forces_within_threshold: 0.00e+00, loss: 5.22e+00, lr: 5.00e-04, epoch: 8.47e-01, step: 5.00e+01
-2024-10-23 20:36:20 (INFO): Evaluating on val.
-
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.52s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.30s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.40s/it]
-2024-10-23 20:36:27 (INFO): energy_mae: 1.2014, forcesx_mae: 0.0225, forcesy_mae: 0.0291, forcesz_mae: 0.0233, forces_mae: 0.0250, forces_cosine_similarity: 0.0485, forces_magnitude_error: 0.0403, energy_forces_within_threshold: 0.0000, loss: 1.2501, epoch: 0.8475
-2024-10-23 20:36:49 (INFO): Total time taken: 203.12808299064636
+
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.25s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.68s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.82s/it]
+2024-11-06 17:40:42 (INFO): Writing results to fine-tuning/results/2024-11-06-17-40-16-ft-oxides/ocp_predictions.npz
+2024-11-06 17:41:03 (INFO): energy_mae: 7.51e+00, forcesx_mae: 5.95e-02, forcesy_mae: 5.59e-02, forcesz_mae: 4.46e-02, forces_mae: 5.33e-02, forces_cosine_similarity: -5.93e-03, forces_magnitude_error: 1.04e-01, energy_forces_within_threshold: 0.00e+00, loss: 7.64e+00, lr: 5.00e-04, epoch: 3.39e-01, step: 2.00e+01
+2024-11-06 17:41:05 (INFO): Evaluating on val.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.61s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.53s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.61s/it]
+2024-11-06 17:41:12 (INFO): energy_mae: 6.5926, forcesx_mae: 0.0251, forcesy_mae: 0.0256, forcesz_mae: 0.0186, forces_mae: 0.0231, forces_cosine_similarity: -0.0862, forces_magnitude_error: 0.0280, energy_forces_within_threshold: 0.0000, loss: 6.6141, epoch: 0.3390
+2024-11-06 17:41:13 (INFO): Predicting on test.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.46s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.70s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.89s/it]
+2024-11-06 17:41:18 (INFO): Writing results to fine-tuning/results/2024-11-06-17-40-16-ft-oxides/ocp_predictions.npz
+2024-11-06 17:41:43 (INFO): energy_mae: 1.06e+01, forcesx_mae: 4.34e-02, forcesy_mae: 3.21e-02, forcesz_mae: 3.94e-02, forces_mae: 3.83e-02, forces_cosine_similarity: -1.02e-01, forces_magnitude_error: 5.53e-02, energy_forces_within_threshold: 0.00e+00, loss: 1.07e+01, lr: 5.00e-04, epoch: 5.08e-01, step: 3.00e+01
+2024-11-06 17:41:45 (INFO): Evaluating on val.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.86s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.39s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.53s/it]
+2024-11-06 17:41:52 (INFO): energy_mae: 2.0556, forcesx_mae: 0.0196, forcesy_mae: 0.0269, forcesz_mae: 0.0222, forces_mae: 0.0229, forces_cosine_similarity: 0.0372, forces_magnitude_error: 0.0354, energy_forces_within_threshold: 0.0000, loss: 2.1169, epoch: 0.5085
+2024-11-06 17:41:53 (INFO): Predicting on test.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.72s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.77s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.01s/it]
+2024-11-06 17:41:59 (INFO): Writing results to fine-tuning/results/2024-11-06-17-40-16-ft-oxides/ocp_predictions.npz
+2024-11-06 17:42:23 (INFO): energy_mae: 5.72e+00, forcesx_mae: 2.21e-02, forcesy_mae: 1.47e-02, forcesz_mae: 1.98e-02, forces_mae: 1.89e-02, forces_cosine_similarity: 5.86e-02, forces_magnitude_error: 2.30e-02, energy_forces_within_threshold: 0.00e+00, loss: 5.75e+00, lr: 5.00e-04, epoch: 6.78e-01, step: 4.00e+01
+2024-11-06 17:42:26 (INFO): Evaluating on val.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.76s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.40s/it]
device 0: 100%|██████████| 2/2 [00:07<00:00, 3.53s/it]
+2024-11-06 17:42:33 (INFO): energy_mae: 9.2451, forcesx_mae: 0.0197, forcesy_mae: 0.0202, forcesz_mae: 0.0165, forces_mae: 0.0188, forces_cosine_similarity: 0.0075, forces_magnitude_error: 0.0235, energy_forces_within_threshold: 0.0000, loss: 9.3306, epoch: 0.6780
+2024-11-06 17:42:33 (INFO): Predicting on test.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.53s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.74s/it]
device 0: 100%|██████████| 2/2 [00:05<00:00, 2.94s/it]
+2024-11-06 17:42:39 (INFO): Writing results to fine-tuning/results/2024-11-06-17-40-16-ft-oxides/ocp_predictions.npz
+2024-11-06 17:43:01 (INFO): energy_mae: 3.39e+00, forcesx_mae: 4.48e-02, forcesy_mae: 3.11e-02, forcesz_mae: 4.03e-02, forces_mae: 3.87e-02, forces_cosine_similarity: -6.04e-02, forces_magnitude_error: 5.50e-02, energy_forces_within_threshold: 0.00e+00, loss: 3.46e+00, lr: 5.00e-04, epoch: 8.47e-01, step: 5.00e+01
+2024-11-06 17:43:03 (INFO): Evaluating on val.
+
device 0: 0%| | 0/2 [00:00, ?it/s]
device 0: 50%|█████ | 1/2 [00:03<00:03, 3.73s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.34s/it]
device 0: 100%|██████████| 2/2 [00:06<00:00, 3.47s/it]
+2024-11-06 17:43:10 (INFO): energy_mae: 10.5298, forcesx_mae: 0.0418, forcesy_mae: 0.0385, forcesz_mae: 0.0297, forces_mae: 0.0367, forces_cosine_similarity: -0.1401, forces_magnitude_error: 0.0549, energy_forces_within_threshold: 0.0000, loss: 10.5924, epoch: 0.8475
+2024-11-06 17:43:31 (INFO): Total time taken: 207.8829927444458
diff --git a/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt
index 210b7ed5de..4efc5fa413 100644
--- a/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt
+++ b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt
@@ -1,6 +1,6 @@
-2024-10-23 20:39:14 (INFO): Running in local mode without elastic launch (single gpu only)
-2024-10-23 20:39:14 (INFO): Setting env PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
-2024-10-23 20:39:14 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem
+2024-11-06 17:45:54 (INFO): Running in local mode without elastic launch (single gpu only)
+2024-11-06 17:45:54 (INFO): Setting env PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
+2024-11-06 17:45:54 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/escn/so3.py:23: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
_Jd = torch.load(os.path.join(os.path.dirname(__file__), "Jd.pt"))
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/scn/spherical_harmonics.py:23: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
@@ -15,17 +15,17 @@
@torch.cuda.amp.autocast(enabled=False)
/home/runner/work/fairchem/fairchem/src/fairchem/core/models/equiformer_v2/layer_norm.py:357: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
@torch.cuda.amp.autocast(enabled=False)
-2024-10-23 20:39:15 (INFO): amp: false
+2024-11-06 17:45:55 (INFO): amp: false
cmd:
- checkpoint_dir: /home/runner/work/fairchem/fairchem/docs/core/checkpoints/2024-10-23-20-39-28
- commit: da93cfe
+ checkpoint_dir: /home/runner/work/fairchem/fairchem/docs/core/checkpoints/2024-11-06-17-46-40
+ commit: 6329e92
identifier: ''
- logs_dir: /home/runner/work/fairchem/fairchem/docs/core/logs/tensorboard/2024-10-23-20-39-28
+ logs_dir: /home/runner/work/fairchem/fairchem/docs/core/logs/tensorboard/2024-11-06-17-46-40
print_every: 10
- results_dir: /home/runner/work/fairchem/fairchem/docs/core/results/2024-10-23-20-39-28
+ results_dir: /home/runner/work/fairchem/fairchem/docs/core/results/2024-11-06-17-46-40
seed: 0
- timestamp_id: 2024-10-23-20-39-28
- version: 0.1.dev1+gda93cfe
+ timestamp_id: 2024-11-06-17-46-40
+ version: 0.1.dev1+g6329e92
dataset: {}
evaluation_metrics:
metrics:
@@ -127,24 +127,24 @@ test_dataset:
trainer: ocp
val_dataset: {}
-2024-10-23 20:39:15 (INFO): Loading model: gemnet_t
-2024-10-23 20:39:17 (INFO): Loaded GemNetT with 31671825 parameters.
-2024-10-23 20:39:17 (WARNING): log_summary for Tensorboard not supported
-2024-10-23 20:39:17 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('data.db')]'
-2024-10-23 20:39:17 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
-2024-10-23 20:39:17 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
-2024-10-23 20:39:17 (INFO): rank: 0: Sampler created...
-2024-10-23 20:39:17 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
-2024-10-23 20:39:17 (INFO): Attemping to load user specified checkpoint at /tmp/fairchem_checkpoints/gndt_oc22_all_s2ef.pt
-2024-10-23 20:39:17 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gndt_oc22_all_s2ef.pt
-/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:603: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
+2024-11-06 17:45:55 (INFO): Loading model: gemnet_t
+2024-11-06 17:45:57 (INFO): Loaded GemNetT with 31671825 parameters.
+2024-11-06 17:45:57 (WARNING): log_summary for Tensorboard not supported
+2024-11-06 17:45:57 (WARNING): Could not find dataset metadata.npz files in '[PosixPath('data.db')]'
+2024-11-06 17:45:57 (WARNING): Disabled BalancedBatchSampler because num_replicas=1.
+2024-11-06 17:45:57 (WARNING): Failed to get data sizes, falling back to uniform partitioning. BalancedBatchSampler requires a dataset that has a metadata attributed with number of atoms.
+2024-11-06 17:45:57 (INFO): rank: 0: Sampler created...
+2024-11-06 17:45:57 (INFO): Created BalancedBatchSampler with sampler=, batch_size=16, drop_last=False
+2024-11-06 17:45:57 (INFO): Attemping to load user specified checkpoint at /tmp/fairchem_checkpoints/gndt_oc22_all_s2ef.pt
+2024-11-06 17:45:57 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gndt_oc22_all_s2ef.pt
+/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/base_trainer.py:601: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
checkpoint = torch.load(checkpoint_path, map_location=map_location)
-2024-10-23 20:39:17 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint.
-2024-10-23 20:39:17 (WARNING): Scale factor comment not found in model
-2024-10-23 20:39:17 (INFO): Predicting on test.
-
device 0: 0%| | 0/3 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py:453: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
+2024-11-06 17:45:57 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint.
+2024-11-06 17:45:57 (WARNING): Scale factor comment not found in model
+2024-11-06 17:45:57 (INFO): Predicting on test.
+
device 0: 0%| | 0/3 [00:00, ?it/s]/home/runner/work/fairchem/fairchem/src/fairchem/core/trainers/ocp_trainer.py:461: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
-
device 0: 33%|███████████▋ | 1/3 [00:03<00:06, 3.24s/it]
device 0: 67%|███████████████████████▎ | 2/3 [00:06<00:02, 2.99s/it]
device 0: 100%|███████████████████████████████████| 3/3 [00:08<00:00, 2.58s/it]
device 0: 100%|███████████████████████████████████| 3/3 [00:08<00:00, 2.72s/it]
-2024-10-23 20:39:26 (INFO): Writing results to /home/runner/work/fairchem/fairchem/docs/core/results/2024-10-23-20-39-28/ocp_predictions.npz
-2024-10-23 20:39:26 (INFO): Total time taken: 8.327327013015747
-Elapsed time = 15.7 seconds
+
device 0: 33%|███████████▋ | 1/3 [00:03<00:06, 3.49s/it]
device 0: 67%|███████████████████████▎ | 2/3 [00:08<00:04, 4.67s/it]
device 0: 100%|███████████████████████████████████| 3/3 [00:11<00:00, 3.51s/it]
device 0: 100%|███████████████████████████████████| 3/3 [00:11<00:00, 3.71s/it]
+2024-11-06 17:46:08 (INFO): Writing results to /home/runner/work/fairchem/fairchem/docs/core/results/2024-11-06-17-46-40/ocp_predictions.npz
+2024-11-06 17:46:08 (INFO): Total time taken: 11.273377418518066
+Elapsed time = 18.4 seconds
diff --git a/_images/3b1d3072535815659fcd41fcfa84999fc973c4417dc7847142da158b885de662.png b/_images/3b1d3072535815659fcd41fcfa84999fc973c4417dc7847142da158b885de662.png
new file mode 100644
index 0000000000..10655861ca
Binary files /dev/null and b/_images/3b1d3072535815659fcd41fcfa84999fc973c4417dc7847142da158b885de662.png differ
diff --git a/_images/4518a7b2c1fbd861b974f320f7429fc3a1ad723ad984720e16d6e1f311afb59c.png b/_images/4518a7b2c1fbd861b974f320f7429fc3a1ad723ad984720e16d6e1f311afb59c.png
new file mode 100644
index 0000000000..dd92d6e9d0
Binary files /dev/null and b/_images/4518a7b2c1fbd861b974f320f7429fc3a1ad723ad984720e16d6e1f311afb59c.png differ
diff --git a/_images/7a93540b84e08ce8cc84ea31f87b209776158e60b6b75b9d57748d373e77d03d.png b/_images/49b63d472f6e407371466dfc822b337a6bf9417900b90da5894dd488e25a7fdd.png
similarity index 70%
rename from _images/7a93540b84e08ce8cc84ea31f87b209776158e60b6b75b9d57748d373e77d03d.png
rename to _images/49b63d472f6e407371466dfc822b337a6bf9417900b90da5894dd488e25a7fdd.png
index f5aa53f616..d005173d3e 100644
Binary files a/_images/7a93540b84e08ce8cc84ea31f87b209776158e60b6b75b9d57748d373e77d03d.png and b/_images/49b63d472f6e407371466dfc822b337a6bf9417900b90da5894dd488e25a7fdd.png differ
diff --git a/_images/4bace82b10c623914f65162c41715b5dfcc194c1569698d06031d1f715ccc1e4.png b/_images/4bace82b10c623914f65162c41715b5dfcc194c1569698d06031d1f715ccc1e4.gif
similarity index 100%
rename from _images/4bace82b10c623914f65162c41715b5dfcc194c1569698d06031d1f715ccc1e4.png
rename to _images/4bace82b10c623914f65162c41715b5dfcc194c1569698d06031d1f715ccc1e4.gif
diff --git a/_images/57f11d76813d1e21dd04af0b0b64c72549156f7c76bdfb81616387ecb9619555.png b/_images/57f11d76813d1e21dd04af0b0b64c72549156f7c76bdfb81616387ecb9619555.png
deleted file mode 100644
index f3ecb89d88..0000000000
Binary files a/_images/57f11d76813d1e21dd04af0b0b64c72549156f7c76bdfb81616387ecb9619555.png and /dev/null differ
diff --git a/_images/5d4843f0cac93be7bec2a2550ce0b520b42e70a7a8417e1336688ebf00675b12.png b/_images/5d4843f0cac93be7bec2a2550ce0b520b42e70a7a8417e1336688ebf00675b12.png
new file mode 100644
index 0000000000..ce53e1e634
Binary files /dev/null and b/_images/5d4843f0cac93be7bec2a2550ce0b520b42e70a7a8417e1336688ebf00675b12.png differ
diff --git a/_images/706c9d10f0670c899918f02b9d91821d5a85fbed3bc0fd7d5ab481303c7b7f1e.png b/_images/706c9d10f0670c899918f02b9d91821d5a85fbed3bc0fd7d5ab481303c7b7f1e.png
new file mode 100644
index 0000000000..e6848aba7f
Binary files /dev/null and b/_images/706c9d10f0670c899918f02b9d91821d5a85fbed3bc0fd7d5ab481303c7b7f1e.png differ
diff --git a/_images/7cb94665edb1d98736dd09114ee2d764d582a7b6c589e95c0553b2c1827daea3.png b/_images/7cb94665edb1d98736dd09114ee2d764d582a7b6c589e95c0553b2c1827daea3.png
deleted file mode 100644
index e024d2bd3a..0000000000
Binary files a/_images/7cb94665edb1d98736dd09114ee2d764d582a7b6c589e95c0553b2c1827daea3.png and /dev/null differ
diff --git a/_images/8d8dced2b5c8d17f245a404d150be294e5f9d338c087af9e4e2d911190efd8a9.png b/_images/8d8dced2b5c8d17f245a404d150be294e5f9d338c087af9e4e2d911190efd8a9.png
new file mode 100644
index 0000000000..ce03729402
Binary files /dev/null and b/_images/8d8dced2b5c8d17f245a404d150be294e5f9d338c087af9e4e2d911190efd8a9.png differ
diff --git a/_images/928c4ab868a9a84c65df3424030232c3f6bb27a2ac447f08a888ba2228d51386.png b/_images/928c4ab868a9a84c65df3424030232c3f6bb27a2ac447f08a888ba2228d51386.png
deleted file mode 100644
index a95939a17a..0000000000
Binary files a/_images/928c4ab868a9a84c65df3424030232c3f6bb27a2ac447f08a888ba2228d51386.png and /dev/null differ
diff --git a/_images/9b352c76d25a9ed9f842185220fcb7d663b56fe2cc68da33f0cfa626f2d799d7.png b/_images/9b352c76d25a9ed9f842185220fcb7d663b56fe2cc68da33f0cfa626f2d799d7.png
deleted file mode 100644
index 2ca108c925..0000000000
Binary files a/_images/9b352c76d25a9ed9f842185220fcb7d663b56fe2cc68da33f0cfa626f2d799d7.png and /dev/null differ
diff --git a/_images/aea3db562f5c103a2b6a397166d765950f2cf0f5d9d7fc35b31b78c364d02752.png b/_images/aea3db562f5c103a2b6a397166d765950f2cf0f5d9d7fc35b31b78c364d02752.png
deleted file mode 100644
index 3bc5ac025b..0000000000
Binary files a/_images/aea3db562f5c103a2b6a397166d765950f2cf0f5d9d7fc35b31b78c364d02752.png and /dev/null differ
diff --git a/_images/b79f6ecb8d7f97377a1b900a5fb72489f5b80d0b3313ad529d9f7a7afcfb7451.png b/_images/b79f6ecb8d7f97377a1b900a5fb72489f5b80d0b3313ad529d9f7a7afcfb7451.png
deleted file mode 100644
index 4119169041..0000000000
Binary files a/_images/b79f6ecb8d7f97377a1b900a5fb72489f5b80d0b3313ad529d9f7a7afcfb7451.png and /dev/null differ
diff --git a/_images/b8462766e3305ecff8dcd979a4b724c753aebef048001a12e1f7b1bc6d61da1b.png b/_images/b8462766e3305ecff8dcd979a4b724c753aebef048001a12e1f7b1bc6d61da1b.png
deleted file mode 100644
index 1d9fe9965a..0000000000
Binary files a/_images/b8462766e3305ecff8dcd979a4b724c753aebef048001a12e1f7b1bc6d61da1b.png and /dev/null differ
diff --git a/_images/d475a1416484b908ccfd6089b54db40c8252ab89de9535b6a5d10b33c02dfa05.png b/_images/d475a1416484b908ccfd6089b54db40c8252ab89de9535b6a5d10b33c02dfa05.png
new file mode 100644
index 0000000000..85c31ec000
Binary files /dev/null and b/_images/d475a1416484b908ccfd6089b54db40c8252ab89de9535b6a5d10b33c02dfa05.png differ
diff --git a/_images/d982e7c75bb1e2e9a6a00e0c24272235031961a6cf30714b1b908a7a0e19e9f1.png b/_images/d982e7c75bb1e2e9a6a00e0c24272235031961a6cf30714b1b908a7a0e19e9f1.png
new file mode 100644
index 0000000000..77ca9a0287
Binary files /dev/null and b/_images/d982e7c75bb1e2e9a6a00e0c24272235031961a6cf30714b1b908a7a0e19e9f1.png differ
diff --git a/_images/ed3455c998151b44fffdf966a1de05463d3b5cab1d1719f196c74ffeb1530bce.png b/_images/ed3455c998151b44fffdf966a1de05463d3b5cab1d1719f196c74ffeb1530bce.png
new file mode 100644
index 0000000000..7a201a41dd
Binary files /dev/null and b/_images/ed3455c998151b44fffdf966a1de05463d3b5cab1d1719f196c74ffeb1530bce.png differ
diff --git a/_images/fe3986b7ae5456a196311a6374cc2f21870ce78f70771b7a24ed66c87f01e292.png b/_images/fe3986b7ae5456a196311a6374cc2f21870ce78f70771b7a24ed66c87f01e292.png
deleted file mode 100644
index f36cda464a..0000000000
Binary files a/_images/fe3986b7ae5456a196311a6374cc2f21870ce78f70771b7a24ed66c87f01e292.png and /dev/null differ
diff --git a/_sources/autoapi/cattsunami/core/index.rst b/_sources/autoapi/cattsunami/core/index.rst
index 13cf41b9ac..7b56cdde94 100644
--- a/_sources/autoapi/cattsunami/core/index.rst
+++ b/_sources/autoapi/cattsunami/core/index.rst
@@ -35,9 +35,6 @@ Package Contents
.. py:attribute:: batch_size
- .. py:attribute:: config
-
-
.. py:attribute:: trainer
@@ -88,15 +85,6 @@ Package Contents
.. py:attribute:: reaction_db_path
- .. py:attribute:: reaction_db
-
-
- .. py:attribute:: adsorbate_db
-
-
- .. py:attribute:: entry
-
-
.. py:method:: get_desorption_mapping(reactant)
Get mapping for desorption reaction
diff --git a/_sources/autoapi/cattsunami/core/ocpneb/index.rst b/_sources/autoapi/cattsunami/core/ocpneb/index.rst
index e022987357..abb4a5f7a8 100644
--- a/_sources/autoapi/cattsunami/core/ocpneb/index.rst
+++ b/_sources/autoapi/cattsunami/core/ocpneb/index.rst
@@ -23,9 +23,6 @@ Module Contents
.. py:attribute:: batch_size
- .. py:attribute:: config
-
-
.. py:attribute:: trainer
diff --git a/_sources/autoapi/cattsunami/core/reaction/index.rst b/_sources/autoapi/cattsunami/core/reaction/index.rst
index dd3a716866..cb65096c06 100644
--- a/_sources/autoapi/cattsunami/core/reaction/index.rst
+++ b/_sources/autoapi/cattsunami/core/reaction/index.rst
@@ -23,15 +23,6 @@ Module Contents
.. py:attribute:: reaction_db_path
- .. py:attribute:: reaction_db
-
-
- .. py:attribute:: adsorbate_db
-
-
- .. py:attribute:: entry
-
-
.. py:method:: get_desorption_mapping(reactant)
Get mapping for desorption reaction
diff --git a/_sources/autoapi/cattsunami/index.rst b/_sources/autoapi/cattsunami/index.rst
index 85aa4c4975..814a3967e6 100644
--- a/_sources/autoapi/cattsunami/index.rst
+++ b/_sources/autoapi/cattsunami/index.rst
@@ -12,8 +12,8 @@ cattsunami
-Subpackages
------------
+Submodules
+----------
.. toctree::
:maxdepth: 1
diff --git a/_sources/autoapi/core/common/data_parallel/index.rst b/_sources/autoapi/core/common/data_parallel/index.rst
index 764972c88d..7ef9c037f1 100644
--- a/_sources/autoapi/core/common/data_parallel/index.rst
+++ b/_sources/autoapi/core/common/data_parallel/index.rst
@@ -107,9 +107,6 @@ Module Contents
.. py:attribute:: on_error
- .. py:attribute:: sampler
-
-
.. py:attribute:: device
diff --git a/_sources/autoapi/core/common/index.rst b/_sources/autoapi/core/common/index.rst
index 9017d93f67..4d3fc1950f 100644
--- a/_sources/autoapi/core/common/index.rst
+++ b/_sources/autoapi/core/common/index.rst
@@ -12,15 +12,6 @@ core.common
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/common/relaxation/index
-
-
Submodules
----------
@@ -35,6 +26,7 @@ Submodules
/autoapi/core/common/logger/index
/autoapi/core/common/profiler_utils/index
/autoapi/core/common/registry/index
+ /autoapi/core/common/relaxation/index
/autoapi/core/common/slurm/index
/autoapi/core/common/test_utils/index
/autoapi/core/common/transforms/index
diff --git a/_sources/autoapi/core/common/logger/index.rst b/_sources/autoapi/core/common/logger/index.rst
index fd5ee19e1a..644e6296fd 100644
--- a/_sources/autoapi/core/common/logger/index.rst
+++ b/_sources/autoapi/core/common/logger/index.rst
@@ -81,15 +81,6 @@ Module Contents
tensorboard, etc.
- .. py:attribute:: project
-
-
- .. py:attribute:: entity
-
-
- .. py:attribute:: group
-
-
.. py:method:: watch(model, log_freq: int = 1000) -> None
Monitor parameters and gradients.
diff --git a/_sources/autoapi/core/common/registry/index.rst b/_sources/autoapi/core/common/registry/index.rst
index f9153685ce..359a0f78b6 100644
--- a/_sources/autoapi/core/common/registry/index.rst
+++ b/_sources/autoapi/core/common/registry/index.rst
@@ -107,6 +107,26 @@ Module Contents
+ .. py:method:: register_loss(name)
+ :classmethod:
+
+
+ Register a loss to registry with key 'name'
+
+ :param name: Key with which the loss will be registered.
+
+ Usage::
+
+ from fairchem.core.common.registry import registry
+ from torch import nn
+
+ @registry.register_loss("mae")
+ class MAELoss(nn.Module):
+ ...
+
+
+
+
.. py:method:: register_model(name: str)
:classmethod:
@@ -198,6 +218,11 @@ Module Contents
+ .. py:method:: get_loss_class(name)
+ :classmethod:
+
+
+
.. py:method:: get_model_class(name: str)
:classmethod:
diff --git a/_sources/autoapi/core/common/relaxation/ase_utils/index.rst b/_sources/autoapi/core/common/relaxation/ase_utils/index.rst
index a514269f97..66c7451869 100644
--- a/_sources/autoapi/core/common/relaxation/ase_utils/index.rst
+++ b/_sources/autoapi/core/common/relaxation/ase_utils/index.rst
@@ -63,20 +63,12 @@ Module Contents
Properties calculator can handle (energy, forces, ...)
- .. py:attribute:: checkpoint
- :value: None
-
-
-
.. py:attribute:: config
.. py:attribute:: trainer
- .. py:attribute:: seed
-
-
.. py:attribute:: a2g
diff --git a/_sources/autoapi/core/common/relaxation/index.rst b/_sources/autoapi/core/common/relaxation/index.rst
index 92c93dc8c4..062af74e4f 100644
--- a/_sources/autoapi/core/common/relaxation/index.rst
+++ b/_sources/autoapi/core/common/relaxation/index.rst
@@ -4,15 +4,6 @@ core.common.relaxation
.. py:module:: core.common.relaxation
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/common/relaxation/optimizers/index
-
-
Submodules
----------
@@ -21,5 +12,6 @@ Submodules
/autoapi/core/common/relaxation/ase_utils/index
/autoapi/core/common/relaxation/ml_relaxation/index
+ /autoapi/core/common/relaxation/optimizers/index
diff --git a/_sources/autoapi/core/common/utils/index.rst b/_sources/autoapi/core/common/utils/index.rst
index 4b498ad16e..68814d04f4 100644
--- a/_sources/autoapi/core/common/utils/index.rst
+++ b/_sources/autoapi/core/common/utils/index.rst
@@ -61,6 +61,7 @@ Functions
core.common.utils.get_max_neighbors_mask
core.common.utils.get_pruned_edge_idx
core.common.utils.merge_dicts
+ core.common.utils.debug_log_entry_exit
core.common.utils.setup_logging
core.common.utils.compute_neighbors
core.common.utils.check_traj_files
@@ -75,7 +76,6 @@ Functions
core.common.utils.cg_change_mat
core.common.utils.irreps_sum
core.common.utils.update_config
- core.common.utils.get_loss_module
core.common.utils.load_model_and_weights_from_checkpoint
core.common.utils.get_timestamp_uid
@@ -257,6 +257,8 @@ Module Contents
+.. py:function:: debug_log_entry_exit(func)
+
.. py:function:: setup_logging() -> None
.. py:function:: compute_neighbors(data, edge_index)
@@ -294,8 +296,6 @@ Module Contents
are now. Update old configs to fit the new expected structure.
-.. py:function:: get_loss_module(loss_name)
-
.. py:function:: load_model_and_weights_from_checkpoint(checkpoint_path: str) -> torch.nn.Module
.. py:function:: get_timestamp_uid() -> str
diff --git a/_sources/autoapi/core/datasets/ase_datasets/index.rst b/_sources/autoapi/core/datasets/ase_datasets/index.rst
index 52a7112a0f..d269cc4d00 100644
--- a/_sources/autoapi/core/datasets/ase_datasets/index.rst
+++ b/_sources/autoapi/core/datasets/ase_datasets/index.rst
@@ -70,9 +70,6 @@ Module Contents
Identifiers need not be any particular type.
- .. py:attribute:: a2g_args
-
-
.. py:attribute:: a2g
diff --git a/_sources/autoapi/core/datasets/index.rst b/_sources/autoapi/core/datasets/index.rst
index 1ee6ab1c7e..b8f2ee82de 100644
--- a/_sources/autoapi/core/datasets/index.rst
+++ b/_sources/autoapi/core/datasets/index.rst
@@ -4,15 +4,6 @@ core.datasets
.. py:module:: core.datasets
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/datasets/embeddings/index
-
-
Submodules
----------
@@ -22,6 +13,7 @@ Submodules
/autoapi/core/datasets/_utils/index
/autoapi/core/datasets/ase_datasets/index
/autoapi/core/datasets/base_dataset/index
+ /autoapi/core/datasets/embeddings/index
/autoapi/core/datasets/lmdb_database/index
/autoapi/core/datasets/lmdb_dataset/index
/autoapi/core/datasets/oc22_lmdb_dataset/index
@@ -347,10 +339,12 @@ Package Contents
.. py:property:: metadata
+
Load the metadata from the DB if present
.. py:property:: _nextid
+
Get the id of the next row to be written
diff --git a/_sources/autoapi/core/datasets/lmdb_database/index.rst b/_sources/autoapi/core/datasets/lmdb_database/index.rst
index 1c5f256ca1..3dfd3ffa85 100644
--- a/_sources/autoapi/core/datasets/lmdb_database/index.rst
+++ b/_sources/autoapi/core/datasets/lmdb_database/index.rst
@@ -96,10 +96,12 @@ Module Contents
.. py:property:: metadata
+
Load the metadata from the DB if present
.. py:property:: _nextid
+
Get the id of the next row to be written
diff --git a/_sources/autoapi/core/index.rst b/_sources/autoapi/core/index.rst
index 6dd99ba237..c4ea9cb521 100644
--- a/_sources/autoapi/core/index.rst
+++ b/_sources/autoapi/core/index.rst
@@ -12,12 +12,14 @@ core
-Subpackages
------------
+Submodules
+----------
.. toctree::
:maxdepth: 1
+ /autoapi/core/_cli/index
+ /autoapi/core/_cli_hydra/index
/autoapi/core/common/index
/autoapi/core/datasets/index
/autoapi/core/models/index
@@ -28,13 +30,3 @@ Subpackages
/autoapi/core/trainers/index
-Submodules
-----------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/_cli/index
- /autoapi/core/_cli_hydra/index
-
-
diff --git a/_sources/autoapi/core/models/base/index.rst b/_sources/autoapi/core/models/base/index.rst
index 976cbccf70..772f7998ec 100644
--- a/_sources/autoapi/core/models/base/index.rst
+++ b/_sources/autoapi/core/models/base/index.rst
@@ -126,7 +126,7 @@ Module Contents
-.. py:class:: HydraModel(backbone: dict | None = None, heads: dict | None = None, finetune_config: dict | None = None, otf_graph: bool = True, pass_through_head_outputs: bool = False)
+.. py:class:: HydraModel(backbone: dict | None = None, heads: dict | None = None, finetune_config: dict | None = None, otf_graph: bool = True, pass_through_head_outputs: bool = False, freeze_backbone: bool = False)
Bases: :py:obj:`torch.nn.Module`, :py:obj:`GraphModelMixin`
@@ -174,11 +174,6 @@ Module Contents
.. py:attribute:: pass_through_head_outputs
- .. py:attribute:: starting_model
- :value: None
-
-
-
.. py:method:: forward(data: torch_geometric.data.Batch)
diff --git a/_sources/autoapi/core/models/dimenet_plus_plus/index.rst b/_sources/autoapi/core/models/dimenet_plus_plus/index.rst
index f3d560ede3..42659bd76a 100644
--- a/_sources/autoapi/core/models/dimenet_plus_plus/index.rst
+++ b/_sources/autoapi/core/models/dimenet_plus_plus/index.rst
@@ -247,9 +247,6 @@ Module Contents
- .. py:attribute:: act
-
-
.. py:attribute:: cutoff
diff --git a/_sources/autoapi/core/models/equiformer_v2/activation/index.rst b/_sources/autoapi/core/models/equiformer_v2/activation/index.rst
index 20404a62de..b19335a178 100644
--- a/_sources/autoapi/core/models/equiformer_v2/activation/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/activation/index.rst
@@ -387,19 +387,6 @@ Module Contents
.. py:attribute:: num_channels
- .. py:attribute:: num_components
- :value: 0
-
-
-
- .. py:attribute:: expand_index
-
-
- .. py:attribute:: start_idx
- :value: 0
-
-
-
.. py:attribute:: scalar_act
diff --git a/_sources/autoapi/core/models/equiformer_v2/index.rst b/_sources/autoapi/core/models/equiformer_v2/index.rst
index 452c7e846b..77efbb2190 100644
--- a/_sources/autoapi/core/models/equiformer_v2/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/index.rst
@@ -4,16 +4,6 @@ core.models.equiformer_v2
.. py:module:: core.models.equiformer_v2
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/models/equiformer_v2/prediction_heads/index
- /autoapi/core/models/equiformer_v2/trainers/index
-
-
Submodules
----------
@@ -30,9 +20,11 @@ Submodules
/autoapi/core/models/equiformer_v2/input_block/index
/autoapi/core/models/equiformer_v2/layer_norm/index
/autoapi/core/models/equiformer_v2/module_list/index
+ /autoapi/core/models/equiformer_v2/prediction_heads/index
/autoapi/core/models/equiformer_v2/radial_function/index
/autoapi/core/models/equiformer_v2/so2_ops/index
/autoapi/core/models/equiformer_v2/so3/index
+ /autoapi/core/models/equiformer_v2/trainers/index
/autoapi/core/models/equiformer_v2/transformer_block/index
/autoapi/core/models/equiformer_v2/wigner/index
diff --git a/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst b/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst
index 572d3af5dc..8bb8fdbd7d 100644
--- a/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/layer_norm/index.rst
@@ -204,9 +204,6 @@ Module Contents
.. py:attribute:: normalization
- .. py:attribute:: expand_index
-
-
.. py:method:: __repr__() -> str
@@ -237,9 +234,6 @@ Module Contents
.. py:attribute:: affine_weight
- .. py:attribute:: expand_index
-
-
.. py:method:: __repr__() -> str
diff --git a/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst b/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst
index adc0a3e233..2306c93931 100644
--- a/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/radial_function/index.rst
@@ -23,36 +23,6 @@ Module Contents
Contruct a radial function (linear layers + layer normalization + SiLU) given a list of channels
- .. py:attribute:: modules
- :value: []
-
-
- Return an iterator over all modules in the network.
-
- :Yields: *Module* -- a module in the network
-
- .. note::
-
- Duplicate modules are returned only once. In the following
- example, ``l`` will be returned only once.
-
- Example::
-
- >>> l = nn.Linear(2, 2)
- >>> net = nn.Sequential(l, l)
- >>> for idx, m in enumerate(net.modules()):
- ... print(idx, '->', m)
-
- 0 -> Sequential(
- (0): Linear(in_features=2, out_features=2, bias=True)
- (1): Linear(in_features=2, out_features=2, bias=True)
- )
- 1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
- .. py:attribute:: input_channels
-
-
.. py:attribute:: net
diff --git a/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst b/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst
index 743a7c13ef..5ca9a442b5 100644
--- a/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/so2_ops/index.rst
@@ -53,11 +53,6 @@ Module Contents
:type: int
- .. py:attribute:: num_channels
- :value: 0
-
-
-
.. py:attribute:: fc
@@ -113,19 +108,6 @@ Module Contents
.. py:attribute:: extra_m0_output_channels
- .. py:attribute:: num_channels_rad
- :value: 0
-
-
-
- .. py:attribute:: num_channels_m0
- :value: 0
-
-
-
- .. py:attribute:: m0_output_channels
-
-
.. py:attribute:: fc_m0
@@ -184,16 +166,6 @@ Module Contents
.. py:attribute:: num_resolutions
- .. py:attribute:: num_channels_rad
- :value: 0
-
-
-
- .. py:attribute:: num_channels_m0
- :value: 0
-
-
-
.. py:attribute:: fc_m0
diff --git a/_sources/autoapi/core/models/equiformer_v2/so3/index.rst b/_sources/autoapi/core/models/equiformer_v2/so3/index.rst
index aed875c146..4b9f12b765 100644
--- a/_sources/autoapi/core/models/equiformer_v2/so3/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/so3/index.rst
@@ -60,32 +60,6 @@ Module Contents
- .. py:attribute:: l_harmonic
-
-
- .. py:attribute:: m_harmonic
-
-
- .. py:attribute:: m_complex
-
-
- .. py:attribute:: res_size
-
-
- .. py:attribute:: offset
- :value: 0
-
-
-
- .. py:attribute:: num_coefficients
-
-
- .. py:attribute:: to_m
-
-
- .. py:attribute:: m_size
-
-
.. py:attribute:: mask_indices_cache
:value: None
@@ -142,9 +116,6 @@ Module Contents
- .. py:attribute:: embedding
-
-
.. py:method:: clone() -> SO3_Embedding
@@ -237,27 +208,16 @@ Module Contents
.. py:attribute:: mapping
- .. py:attribute:: device
- :value: 'cpu'
-
-
-
- .. py:attribute:: to_grid
-
-
- .. py:attribute:: to_grid_mat
-
+ .. py:method:: get_to_grid_mat(device)
- .. py:attribute:: from_grid
+ .. py:method:: get_from_grid_mat(device)
- .. py:attribute:: from_grid_mat
+ .. py:method:: to_grid(embedding, lmax: int, mmax: int)
- .. py:method:: get_to_grid_mat(device)
-
- .. py:method:: get_from_grid_mat(device)
+ .. py:method:: from_grid(grid, lmax: int, mmax: int)
.. py:class:: SO3_Linear(in_features: int, out_features: int, lmax: int, bias: bool = True)
@@ -364,15 +324,9 @@ Module Contents
.. py:attribute:: weight
- .. py:attribute:: bound
-
-
.. py:attribute:: bias
- .. py:attribute:: expand_index
-
-
.. py:method:: forward(input_embedding)
diff --git a/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst b/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst
index 8ea081b698..960721d8e4 100644
--- a/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst
+++ b/_sources/autoapi/core/models/equiformer_v2/transformer_block/index.rst
@@ -127,11 +127,6 @@ Module Contents
.. py:attribute:: use_sep_s2_act
- .. py:attribute:: extra_m0_output_channels
- :value: None
-
-
-
.. py:attribute:: so2_conv_1
@@ -279,9 +274,6 @@ Module Contents
:type proj_drop: float
- .. py:attribute:: max_lmax
-
-
.. py:attribute:: norm_1
diff --git a/_sources/autoapi/core/models/escn/escn/index.rst b/_sources/autoapi/core/models/escn/escn/index.rst
index 880e6b2e03..8ea21b9172 100644
--- a/_sources/autoapi/core/models/escn/escn/index.rst
+++ b/_sources/autoapi/core/models/escn/escn/index.rst
@@ -182,9 +182,6 @@ Module Contents
.. py:attribute:: sphharm_weights
- :type: list[torch.nn.Parameter]
- :value: []
-
.. py:method:: forward(data)
@@ -294,12 +291,10 @@ Module Contents
:vartype training: bool
- .. py:attribute:: energy_block
- :value: None
-
+ .. py:attribute:: reduce
- .. py:attribute:: reduce
+ .. py:attribute:: energy_block
.. py:method:: forward(data: torch_geometric.data.batch.Batch, emb: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]
@@ -354,8 +349,6 @@ Module Contents
.. py:attribute:: force_block
- :value: None
-
.. py:method:: forward(data: torch_geometric.data.batch.Batch, emb: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]
@@ -542,11 +535,6 @@ Module Contents
.. py:attribute:: act
- .. py:attribute:: num_channels_m0
- :value: 0
-
-
-
.. py:attribute:: fc1_dist0
@@ -605,11 +593,6 @@ Module Contents
.. py:attribute:: act
- .. py:attribute:: num_channels
- :value: 0
-
-
-
.. py:attribute:: fc1_dist
diff --git a/_sources/autoapi/core/models/escn/escn_exportable/index.rst b/_sources/autoapi/core/models/escn/escn_exportable/index.rst
index e8b5b20315..ec822c39ff 100644
--- a/_sources/autoapi/core/models/escn/escn_exportable/index.rst
+++ b/_sources/autoapi/core/models/escn/escn_exportable/index.rst
@@ -157,9 +157,6 @@ Module Contents
.. py:attribute:: sph_feature_size
- .. py:attribute:: Jd_list
-
-
.. py:method:: forward_trainable(data: torch_geometric.data.batch.Batch) -> dict[str, torch.Tensor]
@@ -356,9 +353,6 @@ Module Contents
.. py:attribute:: mappingReduced
- .. py:attribute:: num_channels_m0
-
-
.. py:attribute:: fc1_dist0
@@ -415,14 +409,6 @@ Module Contents
.. py:attribute:: act
- .. py:attribute:: num_coefficents
- :value: 0
-
-
-
- .. py:attribute:: num_channels
-
-
.. py:attribute:: fc1_dist
diff --git a/_sources/autoapi/core/models/escn/index.rst b/_sources/autoapi/core/models/escn/index.rst
index 85a9451761..e1d0282bed 100644
--- a/_sources/autoapi/core/models/escn/index.rst
+++ b/_sources/autoapi/core/models/escn/index.rst
@@ -176,9 +176,6 @@ Package Contents
.. py:attribute:: sphharm_weights
- :type: list[torch.nn.Parameter]
- :value: []
-
.. py:method:: forward(data)
diff --git a/_sources/autoapi/core/models/escn/so3/index.rst b/_sources/autoapi/core/models/escn/so3/index.rst
index e9b5697c1d..837369a0db 100644
--- a/_sources/autoapi/core/models/escn/so3/index.rst
+++ b/_sources/autoapi/core/models/escn/so3/index.rst
@@ -69,14 +69,6 @@ Module Contents
.. py:attribute:: res_size
- .. py:attribute:: offset
- :value: 0
-
-
-
- .. py:attribute:: num_coefficients
-
-
.. py:attribute:: to_m
@@ -122,9 +114,6 @@ Module Contents
- .. py:attribute:: embedding
-
-
.. py:method:: clone() -> SO3_Embedding
diff --git a/_sources/autoapi/core/models/escn/so3_exportable/index.rst b/_sources/autoapi/core/models/escn/so3_exportable/index.rst
index 0aa96050fd..ded96a1171 100644
--- a/_sources/autoapi/core/models/escn/so3_exportable/index.rst
+++ b/_sources/autoapi/core/models/escn/so3_exportable/index.rst
@@ -54,29 +54,9 @@ Module Contents
.. py:attribute:: num_resolutions
- .. py:attribute:: l_harmonic
-
-
- .. py:attribute:: m_harmonic
-
-
- .. py:attribute:: m_complex
-
-
.. py:attribute:: res_size
- .. py:attribute:: offset
- :value: 0
-
-
-
- .. py:attribute:: num_coefficients
-
-
- .. py:attribute:: to_m
-
-
.. py:attribute:: m_size
@@ -133,26 +113,15 @@ Module Contents
.. py:attribute:: mapping
- .. py:attribute:: device
- :value: 'cpu'
-
-
-
- .. py:attribute:: to_grid
-
-
- .. py:attribute:: to_grid_mat
-
-
- .. py:attribute:: from_grid
+ .. py:method:: get_to_grid_mat(device=None)
- .. py:attribute:: from_grid_mat
+ .. py:method:: get_from_grid_mat(device=None)
- .. py:method:: get_to_grid_mat(device=None)
+ .. py:method:: to_grid(embedding, lmax: int, mmax: int)
- .. py:method:: get_from_grid_mat(device=None)
+ .. py:method:: from_grid(grid, lmax: int, mmax: int)
diff --git a/_sources/autoapi/core/models/gemnet/gemnet/index.rst b/_sources/autoapi/core/models/gemnet/gemnet/index.rst
index cdab48634a..cec423ef83 100644
--- a/_sources/autoapi/core/models/gemnet/gemnet/index.rst
+++ b/_sources/autoapi/core/models/gemnet/gemnet/index.rst
@@ -112,9 +112,6 @@ Module Contents
.. py:attribute:: radial_basis
- .. py:attribute:: radial_basis_cbf3
-
-
.. py:attribute:: cbf_basis3
@@ -137,16 +134,9 @@ Module Contents
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: int_blocks
- :value: []
-
-
-
- .. py:attribute:: interaction_block
.. py:attribute:: shared_parameters
diff --git a/_sources/autoapi/core/models/gemnet/index.rst b/_sources/autoapi/core/models/gemnet/index.rst
index 0054b377af..df48d35674 100644
--- a/_sources/autoapi/core/models/gemnet/index.rst
+++ b/_sources/autoapi/core/models/gemnet/index.rst
@@ -4,15 +4,6 @@ core.models.gemnet
.. py:module:: core.models.gemnet
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/models/gemnet/layers/index
-
-
Submodules
----------
@@ -21,6 +12,7 @@ Submodules
/autoapi/core/models/gemnet/gemnet/index
/autoapi/core/models/gemnet/initializers/index
+ /autoapi/core/models/gemnet/layers/index
/autoapi/core/models/gemnet/utils/index
@@ -121,9 +113,6 @@ Package Contents
.. py:attribute:: radial_basis
- .. py:attribute:: radial_basis_cbf3
-
-
.. py:attribute:: cbf_basis3
@@ -146,16 +135,9 @@ Package Contents
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: int_blocks
- :value: []
-
-
-
- .. py:attribute:: interaction_block
.. py:attribute:: shared_parameters
diff --git a/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst
index f5a3f2ade9..033b991326 100644
--- a/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet/layers/embedding_block/index.rst
@@ -61,9 +61,6 @@ Module Contents
:type activation: str
- .. py:attribute:: in_features
-
-
.. py:attribute:: dense
diff --git a/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst
index 6f670adcf8..b111f243a9 100644
--- a/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet/layers/interaction_block/index.rst
@@ -58,9 +58,6 @@ Module Contents
.. py:attribute:: name
- .. py:attribute:: block_nr
-
-
.. py:attribute:: dense_ca
diff --git a/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst
index 682482ba57..163e8e92cd 100644
--- a/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet/layers/radial_basis/index.rst
@@ -111,21 +111,12 @@ Module Contents
:type pregamma_initial: float
- .. py:attribute:: prefactor
-
-
.. py:attribute:: pregamma
.. py:attribute:: softplus
- .. py:attribute:: exp1
-
-
- .. py:attribute:: exp2
-
-
.. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor
@@ -147,22 +138,10 @@ Module Contents
.. py:attribute:: inv_cutoff
- .. py:attribute:: env_name
-
-
- .. py:attribute:: env_hparams
-
-
.. py:attribute:: envelope
:type: PolynomialEnvelope | ExponentialEnvelope
- .. py:attribute:: rbf_name
-
-
- .. py:attribute:: rbf_hparams
-
-
.. py:method:: forward(d)
diff --git a/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst
index 8b122ad337..f08bc0403e 100644
--- a/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet/layers/spherical_basis/index.rst
@@ -46,12 +46,6 @@ Module Contents
.. py:attribute:: efficient
- .. py:attribute:: cbf_name
-
-
- .. py:attribute:: cbf_hparams
-
-
.. py:method:: forward(D_ca, cosφ_cab, id3_ca)
diff --git a/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst b/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst
index 03a10dbd9d..837870cba3 100644
--- a/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/gemnet/index.rst
@@ -115,9 +115,6 @@ Module Contents
.. py:attribute:: radial_basis
- .. py:attribute:: radial_basis_cbf3
-
-
.. py:attribute:: cbf_basis3
@@ -140,16 +137,9 @@ Module Contents
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: int_blocks
- :value: []
-
-
-
- .. py:attribute:: interaction_block
.. py:method:: get_triplets(edge_index, num_atoms)
diff --git a/_sources/autoapi/core/models/gemnet_gp/index.rst b/_sources/autoapi/core/models/gemnet_gp/index.rst
index 5c816a7e88..d09ac53f38 100644
--- a/_sources/autoapi/core/models/gemnet_gp/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/index.rst
@@ -4,15 +4,6 @@ core.models.gemnet_gp
.. py:module:: core.models.gemnet_gp
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/models/gemnet_gp/layers/index
-
-
Submodules
----------
@@ -21,6 +12,7 @@ Submodules
/autoapi/core/models/gemnet_gp/gemnet/index
/autoapi/core/models/gemnet_gp/initializers/index
+ /autoapi/core/models/gemnet_gp/layers/index
/autoapi/core/models/gemnet_gp/utils/index
@@ -127,9 +119,6 @@ Package Contents
.. py:attribute:: radial_basis
- .. py:attribute:: radial_basis_cbf3
-
-
.. py:attribute:: cbf_basis3
@@ -152,16 +141,9 @@ Package Contents
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: int_blocks
- :value: []
-
-
-
- .. py:attribute:: interaction_block
.. py:method:: get_triplets(edge_index, num_atoms)
diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst
index 639a50447f..5d7a19ee37 100644
--- a/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/layers/embedding_block/index.rst
@@ -61,9 +61,6 @@ Module Contents
:type activation: str
- .. py:attribute:: in_features
-
-
.. py:attribute:: dense
diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst
index 9158a1c8d2..a8c82d11a2 100644
--- a/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/layers/interaction_block/index.rst
@@ -58,9 +58,6 @@ Module Contents
.. py:attribute:: name
- .. py:attribute:: block_nr
-
-
.. py:attribute:: dense_ca
diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst
index 2df2df7d82..c9d6c50200 100644
--- a/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/layers/radial_basis/index.rst
@@ -110,21 +110,12 @@ Module Contents
:type pregamma_initial: float
- .. py:attribute:: prefactor
-
-
.. py:attribute:: pregamma
.. py:attribute:: softplus
- .. py:attribute:: exp1
-
-
- .. py:attribute:: exp2
-
-
.. py:method:: forward(d_scaled) -> torch.Tensor
@@ -146,18 +137,6 @@ Module Contents
.. py:attribute:: inv_cutoff
- .. py:attribute:: env_name
-
-
- .. py:attribute:: env_hparams
-
-
- .. py:attribute:: rbf_name
-
-
- .. py:attribute:: rbf_hparams
-
-
.. py:method:: forward(d)
diff --git a/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst
index a10be21bd7..bddfa5a0b8 100644
--- a/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet_gp/layers/spherical_basis/index.rst
@@ -46,12 +46,6 @@ Module Contents
.. py:attribute:: efficient
- .. py:attribute:: cbf_name
-
-
- .. py:attribute:: cbf_hparams
-
-
.. py:method:: forward(D_ca, cosφ_cab, id3_ca)
diff --git a/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst b/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst
index 8cd7f902f2..05853b8813 100644
--- a/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/gemnet_oc/index.rst
@@ -206,13 +206,9 @@ Module Contents
.. py:attribute:: int_blocks
- :value: []
-
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: out_mlp_E
@@ -221,9 +217,6 @@ Module Contents
.. py:attribute:: out_energy
- .. py:attribute:: out_initializer
-
-
.. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)
@@ -513,16 +506,9 @@ Module Contents
.. py:attribute:: out_mlp_E
- :value: None
-
.. py:attribute:: out_energy
- :value: None
-
-
-
- .. py:attribute:: out_initializer
.. py:method:: forward(data: torch_geometric.data.batch.Batch, emb: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]
@@ -582,9 +568,6 @@ Module Contents
.. py:attribute:: forces_coupled
- .. py:attribute:: emb_size_edge
-
-
.. py:method:: forward(data: torch_geometric.data.batch.Batch, emb: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]
Head forward.
diff --git a/_sources/autoapi/core/models/gemnet_oc/index.rst b/_sources/autoapi/core/models/gemnet_oc/index.rst
index f3cdaeea82..624782968a 100644
--- a/_sources/autoapi/core/models/gemnet_oc/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/index.rst
@@ -4,15 +4,6 @@ core.models.gemnet_oc
.. py:module:: core.models.gemnet_oc
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/models/gemnet_oc/layers/index
-
-
Submodules
----------
@@ -22,6 +13,7 @@ Submodules
/autoapi/core/models/gemnet_oc/gemnet_oc/index
/autoapi/core/models/gemnet_oc/initializers/index
/autoapi/core/models/gemnet_oc/interaction_indices/index
+ /autoapi/core/models/gemnet_oc/layers/index
/autoapi/core/models/gemnet_oc/utils/index
@@ -217,13 +209,9 @@ Package Contents
.. py:attribute:: int_blocks
- :value: []
-
.. py:attribute:: out_blocks
- :value: []
-
.. py:attribute:: out_mlp_E
@@ -232,9 +220,6 @@ Package Contents
.. py:attribute:: out_energy
- .. py:attribute:: out_initializer
-
-
.. py:method:: set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)
diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst
index 6f89ffa62d..71f7a739ce 100644
--- a/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/layers/embedding_block/index.rst
@@ -65,9 +65,6 @@ Module Contents
:type activation: str
- .. py:attribute:: in_features
-
-
.. py:attribute:: dense
diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst
index 3a19e9c94a..df5084506e 100644
--- a/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/layers/interaction_block/index.rst
@@ -107,15 +107,9 @@ Module Contents
.. py:attribute:: inv_sqrt_2
- .. py:attribute:: num_eint
-
-
.. py:attribute:: inv_sqrt_num_eint
- .. py:attribute:: num_aint
-
-
.. py:attribute:: inv_sqrt_num_aint
diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst
index b023020b47..e3baea5b3d 100644
--- a/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/layers/radial_basis/index.rst
@@ -107,9 +107,6 @@ Module Contents
:vartype training: bool
- .. py:attribute:: offset
-
-
.. py:attribute:: coeff
@@ -156,21 +153,12 @@ Module Contents
:type pregamma_initial: float
- .. py:attribute:: prefactor
-
-
.. py:attribute:: pregamma
.. py:attribute:: softplus
- .. py:attribute:: exp1
-
-
- .. py:attribute:: exp2
-
-
.. py:method:: forward(d_scaled: torch.Tensor) -> torch.Tensor
@@ -197,18 +185,6 @@ Module Contents
.. py:attribute:: scale_basis
- .. py:attribute:: env_name
-
-
- .. py:attribute:: env_hparams
-
-
- .. py:attribute:: rbf_name
-
-
- .. py:attribute:: rbf_hparams
-
-
.. py:method:: forward(d: torch.Tensor) -> torch.Tensor
diff --git a/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst b/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst
index 5cfd361a56..cc404d98ea 100644
--- a/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst
+++ b/_sources/autoapi/core/models/gemnet_oc/layers/spherical_basis/index.rst
@@ -46,12 +46,6 @@ Module Contents
.. py:attribute:: scale_basis
- .. py:attribute:: cbf_name
-
-
- .. py:attribute:: cbf_hparams
-
-
.. py:method:: forward(D_ca, cosφ_cab)
@@ -81,12 +75,6 @@ Module Contents
.. py:attribute:: scale_basis
- .. py:attribute:: sbf_name
-
-
- .. py:attribute:: sbf_hparams
-
-
.. py:method:: forward(D_ca, cosφ_cab, θ_cabd)
diff --git a/_sources/autoapi/core/models/index.rst b/_sources/autoapi/core/models/index.rst
index 70d41039c5..04db101df3 100644
--- a/_sources/autoapi/core/models/index.rst
+++ b/_sources/autoapi/core/models/index.rst
@@ -4,34 +4,26 @@ core.models
.. py:module:: core.models
-Subpackages
------------
+Submodules
+----------
.. toctree::
:maxdepth: 1
+ /autoapi/core/models/base/index
+ /autoapi/core/models/dimenet_plus_plus/index
/autoapi/core/models/equiformer_v2/index
/autoapi/core/models/escn/index
/autoapi/core/models/gemnet/index
/autoapi/core/models/gemnet_gp/index
/autoapi/core/models/gemnet_oc/index
+ /autoapi/core/models/model_registry/index
/autoapi/core/models/painn/index
+ /autoapi/core/models/schnet/index
/autoapi/core/models/scn/index
/autoapi/core/models/utils/index
-Submodules
-----------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/models/base/index
- /autoapi/core/models/dimenet_plus_plus/index
- /autoapi/core/models/model_registry/index
- /autoapi/core/models/schnet/index
-
-
Attributes
----------
diff --git a/_sources/autoapi/core/models/painn/painn/index.rst b/_sources/autoapi/core/models/painn/painn/index.rst
index 3ffc240296..62169a7071 100644
--- a/_sources/autoapi/core/models/painn/painn/index.rst
+++ b/_sources/autoapi/core/models/painn/painn/index.rst
@@ -485,8 +485,6 @@ Module Contents
.. py:attribute:: out_energy
- :value: None
-
.. py:method:: forward(data: torch_geometric.data.batch.Batch, emb: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]
diff --git a/_sources/autoapi/core/models/scn/smearing/index.rst b/_sources/autoapi/core/models/scn/smearing/index.rst
index 3e90689321..f5c94b8732 100644
--- a/_sources/autoapi/core/models/scn/smearing/index.rst
+++ b/_sources/autoapi/core/models/scn/smearing/index.rst
@@ -66,9 +66,6 @@ Module Contents
.. py:attribute:: num_output
- .. py:attribute:: offset
-
-
.. py:attribute:: coeff
@@ -115,9 +112,6 @@ Module Contents
.. py:attribute:: num_output
- .. py:attribute:: offset
-
-
.. py:attribute:: coeff
@@ -164,9 +158,6 @@ Module Contents
.. py:attribute:: num_output
- .. py:attribute:: offset
-
-
.. py:attribute:: coeff
diff --git a/_sources/autoapi/core/models/utils/basis/index.rst b/_sources/autoapi/core/models/utils/basis/index.rst
index db63d8a361..1bd0bd843b 100644
--- a/_sources/autoapi/core/models/utils/basis/index.rst
+++ b/_sources/autoapi/core/models/utils/basis/index.rst
@@ -165,9 +165,6 @@ Module Contents
.. py:attribute:: use_cosine
- .. py:attribute:: freq
-
-
.. py:attribute:: freq_filter
@@ -214,13 +211,13 @@ Module Contents
.. py:attribute:: num_freqs
- .. py:attribute:: offset
-
-
.. py:attribute:: coeff
:type: float
+ .. py:attribute:: offset
+
+
.. py:method:: forward(x: torch.Tensor) -> torch.Tensor
@@ -271,9 +268,6 @@ Module Contents
.. py:attribute:: use_cosine
- .. py:attribute:: freq
-
-
.. py:attribute:: freq_filter
@@ -378,18 +372,6 @@ Module Contents
.. py:attribute:: max_n
- .. py:attribute:: m_list
- :type: list[int]
- :value: []
-
-
-
- .. py:attribute:: n_list
- :type: list[int]
- :value: []
-
-
-
.. py:attribute:: out_dim
diff --git a/_sources/autoapi/core/modules/evaluator/index.rst b/_sources/autoapi/core/modules/evaluator/index.rst
index 68e8a9d68e..269d19819b 100644
--- a/_sources/autoapi/core/modules/evaluator/index.rst
+++ b/_sources/autoapi/core/modules/evaluator/index.rst
@@ -17,7 +17,7 @@ Attributes
.. autoapisummary::
- core.modules.evaluator.NONE
+ core.modules.evaluator.NONE_SLICE
Classes
@@ -33,6 +33,13 @@ Functions
.. autoapisummary::
+ core.modules.evaluator.metrics_dict
+ core.modules.evaluator.cosine_similarity
+ core.modules.evaluator.mae
+ core.modules.evaluator.mse
+ core.modules.evaluator.per_atom_mae
+ core.modules.evaluator.per_atom_mse
+ core.modules.evaluator.magnitude_error
core.modules.evaluator.forcesx_mae
core.modules.evaluator.forcesx_mse
core.modules.evaluator.forcesy_mae
@@ -43,16 +50,13 @@ Functions
core.modules.evaluator.energy_within_threshold
core.modules.evaluator.average_distance_within_threshold
core.modules.evaluator.min_diff
- core.modules.evaluator.cosine_similarity
- core.modules.evaluator.mae
- core.modules.evaluator.mse
- core.modules.evaluator.magnitude_error
+ core.modules.evaluator.rmse
Module Contents
---------------
-.. py:data:: NONE
+.. py:data:: NONE_SLICE
.. py:class:: Evaluator(task: str | None = None, eval_metrics: dict | None = None)
@@ -70,15 +74,32 @@ Module Contents
.. py:attribute:: target_metrics
- .. py:method:: eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics=None)
+ .. py:method:: eval(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], prev_metrics: dict | None = None)
.. py:method:: update(key, stat, metrics)
-.. py:function:: forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)
+.. py:function:: metrics_dict(metric_fun: Callable) -> Callable
+
+ Wrap up the return of a metrics function
+
-.. py:function:: forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)
+.. py:function:: cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE)
+
+.. py:function:: mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE) -> torch.Tensor
+
+.. py:function:: mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE) -> torch.Tensor
+
+.. py:function:: per_atom_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE) -> torch.Tensor
+
+.. py:function:: per_atom_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE) -> torch.Tensor
+
+.. py:function:: magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE, p: int = 2) -> torch.Tensor
+
+.. py:function:: forcesx_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE)
+
+.. py:function:: forcesx_mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE_SLICE)
.. py:function:: forcesy_mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None)
@@ -96,11 +117,5 @@ Module Contents
.. py:function:: min_diff(pred_pos: torch.Tensor, dft_pos: torch.Tensor, cell: torch.Tensor, pbc: torch.Tensor)
-.. py:function:: cosine_similarity(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE)
-
-.. py:function:: mae(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int]
-
-.. py:function:: mse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE) -> dict[str, float | int]
-
-.. py:function:: magnitude_error(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = NONE, p: int = 2) -> dict[str, float | int]
+.. py:function:: rmse(prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor], key: collections.abc.Hashable = None) -> dict[str, float | int]
diff --git a/_sources/autoapi/core/modules/exponential_moving_average/index.rst b/_sources/autoapi/core/modules/exponential_moving_average/index.rst
index a7a1855f2f..21b5a24487 100644
--- a/_sources/autoapi/core/modules/exponential_moving_average/index.rst
+++ b/_sources/autoapi/core/modules/exponential_moving_average/index.rst
@@ -39,9 +39,6 @@ Module Contents
:type: int | None
- .. py:attribute:: parameters
-
-
.. py:attribute:: shadow_params
diff --git a/_sources/autoapi/core/modules/index.rst b/_sources/autoapi/core/modules/index.rst
index 0a2b0c0394..f10e116637 100644
--- a/_sources/autoapi/core/modules/index.rst
+++ b/_sources/autoapi/core/modules/index.rst
@@ -12,16 +12,6 @@ core.modules
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/modules/normalization/index
- /autoapi/core/modules/scaling/index
-
-
Submodules
----------
@@ -31,6 +21,8 @@ Submodules
/autoapi/core/modules/evaluator/index
/autoapi/core/modules/exponential_moving_average/index
/autoapi/core/modules/loss/index
+ /autoapi/core/modules/normalization/index
+ /autoapi/core/modules/scaling/index
/autoapi/core/modules/scheduler/index
/autoapi/core/modules/transforms/index
diff --git a/_sources/autoapi/core/modules/loss/index.rst b/_sources/autoapi/core/modules/loss/index.rst
index fd75d0d7c0..95147b6aab 100644
--- a/_sources/autoapi/core/modules/loss/index.rst
+++ b/_sources/autoapi/core/modules/loss/index.rst
@@ -9,15 +9,17 @@ Classes
.. autoapisummary::
- core.modules.loss.L2MAELoss
- core.modules.loss.AtomwiseL2Loss
+ core.modules.loss.MAELoss
+ core.modules.loss.MSELoss
+ core.modules.loss.PerAtomMAELoss
+ core.modules.loss.L2NormLoss
core.modules.loss.DDPLoss
Module Contents
---------------
-.. py:class:: L2MAELoss(reduction: str = 'mean')
+.. py:class:: MAELoss
Bases: :py:obj:`torch.nn.Module`
@@ -54,13 +56,13 @@ Module Contents
:vartype training: bool
- .. py:attribute:: reduction
+ .. py:attribute:: loss
- .. py:method:: forward(input: torch.Tensor, target: torch.Tensor)
+ .. py:method:: forward(pred: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) -> torch.Tensor
-.. py:class:: AtomwiseL2Loss(reduction: str = 'mean')
+.. py:class:: MSELoss
Bases: :py:obj:`torch.nn.Module`
@@ -97,58 +99,103 @@ Module Contents
:vartype training: bool
- .. py:attribute:: reduction
+ .. py:attribute:: loss
- .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor)
+ .. py:method:: forward(pred: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) -> torch.Tensor
-.. py:class:: DDPLoss(loss_fn, loss_name: str = 'mae', reduction: str = 'mean')
+.. py:class:: PerAtomMAELoss
Bases: :py:obj:`torch.nn.Module`
- Base class for all neural network modules.
+ Simply divide a loss by the number of atoms/nodes in the graph.
+ Current this loss is intened to used with scalar values, not vectors or higher tensors.
- Your models should also subclass this class.
- Modules can also contain other Modules, allowing to nest them in
- a tree structure. You can assign the submodules as regular attributes::
+ .. py:attribute:: loss
- import torch.nn as nn
- import torch.nn.functional as F
- class Model(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv1 = nn.Conv2d(1, 20, 5)
- self.conv2 = nn.Conv2d(20, 20, 5)
+ .. py:method:: forward(pred: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) -> torch.Tensor
- def forward(self, x):
- x = F.relu(self.conv1(x))
- return F.relu(self.conv2(x))
- Submodules assigned in this way will be registered, and will have their
- parameters converted too when you call :meth:`to`, etc.
+.. py:class:: L2NormLoss
- .. note::
- As per the example above, an ``__init__()`` call to the parent class
- must be made before assignment on the child.
+ Bases: :py:obj:`torch.nn.Module`
- :ivar training: Boolean represents whether this module is in training or
- evaluation mode.
- :vartype training: bool
+ Currently this loss is intened to used with vectors.
+
+
+ .. py:method:: forward(pred: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor) -> torch.Tensor
- .. py:attribute:: loss_fn
+.. py:class:: DDPLoss(loss_name, reduction: Literal['mean', 'sum'])
- .. py:attribute:: loss_name
+ Bases: :py:obj:`torch.nn.Module`
+
+
+ This class is a wrapper around a loss function that does a few things
+ like handle nans and importantly ensures the reduction is done
+ correctly for DDP. The main issue is that DDP averages gradients
+ over replicas — this only works out of the box if the dimension
+ you are averaging over is completely consistent across all replicas.
+ In our case, that is not true for the number of atoms per batch and
+ there are edge cases when the batch size differs between replicas
+ e.g. if the dataset size is not divisible by the batch_size.
+
+ Scalars are relatively straightforward to handle, but vectors and higher tensors
+ are a bit trickier. Below are two examples of forces.
+
+ Forces input: [Nx3] target: [Nx3]
+ Forces are a vector of length 3 (x,y,z) for each atom.
+ Number of atoms per batch (N) is different for each DDP replica.
+
+ MSE example:
+ #### Local loss computation ####
+ local_loss = MSELoss(input, target) -> [Nx3]
+ num_samples = local_loss.numel() -> [Nx3]
+ local_loss = sum(local_loss [Nx3]) -> [1] sum reduces the loss to a scalar
+ global_samples = all_reduce(num_samples) -> [N0x3 + N1x3 + N2x3 + ...] = [1] where N0 is the number of atoms on replica 0
+ local_loss = local_loss * world_size / global_samples -> [1]
+ #### Global loss computation ####
+ global_loss = sum(local_loss / world_size) -> [1]
+ == sum(local_loss / global_samples) # this is the desired corrected mean
+
+ Norm example:
+ #### Local loss computation ####
+ local_loss = L2MAELoss(input, target) -> [N]
+ num_samples = local_loss.numel() -> [N]
+ local_loss = sum(local_loss [N]) -> [1] sum reduces the loss to a scalar
+ global_samples = all_reduce(num_samples) -> [N0 + N1 + N2 + ...] = [1] where N0 is the number of atoms on replica 0
+ local_loss = local_loss * world_size / global_samples -> [1]
+ #### Global loss computation ####
+ global_loss = sum(local_loss / world_size) -> [1]
+ == sum(local_loss / global_samples) # this is the desired corrected mean
+
+
+ .. py:attribute:: loss_fn
.. py:attribute:: reduction
- .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor | None = None, batch_size: int | None = None)
+ .. py:attribute:: reduction_map
+
+
+ .. py:method:: sum(input, loss, natoms)
+
+
+ .. py:method:: _ddp_mean(num_samples, loss)
+
+
+ .. py:method:: mean(input, loss, natoms)
+
+
+ .. py:method:: _reduction(input, loss, natoms)
+
+
+ .. py:method:: forward(input: torch.Tensor, target: torch.Tensor, natoms: torch.Tensor)
diff --git a/_sources/autoapi/core/scripts/index.rst b/_sources/autoapi/core/scripts/index.rst
index ac0b98f56f..b68eabd491 100644
--- a/_sources/autoapi/core/scripts/index.rst
+++ b/_sources/autoapi/core/scripts/index.rst
@@ -12,15 +12,6 @@ core.scripts
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/core/scripts/hpo/index
-
-
Submodules
----------
@@ -34,6 +25,7 @@ Submodules
/autoapi/core/scripts/fit_normalizers/index
/autoapi/core/scripts/fit_references/index
/autoapi/core/scripts/gif_maker_parallelized/index
+ /autoapi/core/scripts/hpo/index
/autoapi/core/scripts/make_challenge_submission_file/index
/autoapi/core/scripts/make_lmdb_sizes/index
/autoapi/core/scripts/make_submission_file/index
diff --git a/_sources/autoapi/core/trainers/base_trainer/index.rst b/_sources/autoapi/core/trainers/base_trainer/index.rst
index 847a3d6ad5..07ef372881 100644
--- a/_sources/autoapi/core/trainers/base_trainer/index.rst
+++ b/_sources/autoapi/core/trainers/base_trainer/index.rst
@@ -55,12 +55,6 @@ Module Contents
:type: str
- .. py:attribute:: commit_hash
-
-
- .. py:attribute:: logger_name
-
-
.. py:attribute:: config
diff --git a/_sources/autoapi/core/trainers/index.rst b/_sources/autoapi/core/trainers/index.rst
index 57508ef76d..3c83608998 100644
--- a/_sources/autoapi/core/trainers/index.rst
+++ b/_sources/autoapi/core/trainers/index.rst
@@ -58,12 +58,6 @@ Package Contents
:type: str
- .. py:attribute:: commit_hash
-
-
- .. py:attribute:: logger_name
-
-
.. py:attribute:: config
diff --git a/_sources/autoapi/data/index.rst b/_sources/autoapi/data/index.rst
index cabb6cbf35..bc555f6734 100644
--- a/_sources/autoapi/data/index.rst
+++ b/_sources/autoapi/data/index.rst
@@ -4,8 +4,8 @@ data
.. py:module:: data
-Subpackages
------------
+Submodules
+----------
.. toctree::
:maxdepth: 1
diff --git a/_sources/autoapi/data/oc/core/index.rst b/_sources/autoapi/data/oc/core/index.rst
index fac4008c55..f203b50f67 100644
--- a/_sources/autoapi/data/oc/core/index.rst
+++ b/_sources/autoapi/data/oc/core/index.rst
@@ -676,6 +676,7 @@ Package Contents
.. py:property:: molecules_per_volume
+
Convert the solvent density in g/cm3 to the number of molecules per
angstrom cubed of volume.
diff --git a/_sources/autoapi/data/oc/core/solvent/index.rst b/_sources/autoapi/data/oc/core/solvent/index.rst
index 1bde27e8e0..22b870bbd6 100644
--- a/_sources/autoapi/data/oc/core/solvent/index.rst
+++ b/_sources/autoapi/data/oc/core/solvent/index.rst
@@ -59,6 +59,7 @@ Module Contents
.. py:property:: molecules_per_volume
+
Convert the solvent density in g/cm3 to the number of molecules per
angstrom cubed of volume.
diff --git a/_sources/autoapi/data/oc/databases/index.rst b/_sources/autoapi/data/oc/databases/index.rst
index 86750a8fef..759b492636 100644
--- a/_sources/autoapi/data/oc/databases/index.rst
+++ b/_sources/autoapi/data/oc/databases/index.rst
@@ -4,21 +4,13 @@ data.oc.databases
.. py:module:: data.oc.databases
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/data/oc/databases/pkls/index
-
-
Submodules
----------
.. toctree::
:maxdepth: 1
+ /autoapi/data/oc/databases/pkls/index
/autoapi/data/oc/databases/update/index
diff --git a/_sources/autoapi/data/oc/index.rst b/_sources/autoapi/data/oc/index.rst
index 138690ea35..b9d2c9f174 100644
--- a/_sources/autoapi/data/oc/index.rst
+++ b/_sources/autoapi/data/oc/index.rst
@@ -12,24 +12,16 @@ data.oc
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/data/oc/core/index
- /autoapi/data/oc/databases/index
- /autoapi/data/oc/utils/index
-
-
Submodules
----------
.. toctree::
:maxdepth: 1
+ /autoapi/data/oc/core/index
+ /autoapi/data/oc/databases/index
/autoapi/data/oc/structure_generator/index
+ /autoapi/data/oc/utils/index
Attributes
diff --git a/_sources/autoapi/data/oc/utils/geometry/index.rst b/_sources/autoapi/data/oc/utils/geometry/index.rst
index ea8797bd8f..0d007fa69b 100644
--- a/_sources/autoapi/data/oc/utils/geometry/index.rst
+++ b/_sources/autoapi/data/oc/utils/geometry/index.rst
@@ -112,15 +112,6 @@ Module Contents
- .. py:attribute:: a
-
-
- .. py:attribute:: b
-
-
- .. py:attribute:: c
-
-
.. py:attribute:: ur_corner
@@ -147,9 +138,6 @@ Module Contents
Box geometry for orthorhombic cells.
- .. py:attribute:: props
-
-
.. py:attribute:: params
diff --git a/_sources/autoapi/ocpapi/client/client/index.rst b/_sources/autoapi/ocpapi/client/client/index.rst
index 34f0856aca..2a52140038 100644
--- a/_sources/autoapi/ocpapi/client/client/index.rst
+++ b/_sources/autoapi/ocpapi/client/client/index.rst
@@ -75,6 +75,7 @@ Module Contents
.. py:property:: host
:type: str
+
The host being called by this client.
diff --git a/_sources/autoapi/ocpapi/client/index.rst b/_sources/autoapi/ocpapi/client/index.rst
index 04c353b5de..70265b1fde 100644
--- a/_sources/autoapi/ocpapi/client/index.rst
+++ b/_sources/autoapi/ocpapi/client/index.rst
@@ -73,6 +73,7 @@ Package Contents
.. py:property:: host
:type: str
+
The host being called by this client.
diff --git a/_sources/autoapi/ocpapi/index.rst b/_sources/autoapi/ocpapi/index.rst
index a58201701a..998aa8922e 100644
--- a/_sources/autoapi/ocpapi/index.rst
+++ b/_sources/autoapi/ocpapi/index.rst
@@ -12,23 +12,15 @@ ocpapi
-Subpackages
------------
-
-.. toctree::
- :maxdepth: 1
-
- /autoapi/ocpapi/client/index
- /autoapi/ocpapi/workflows/index
-
-
Submodules
----------
.. toctree::
:maxdepth: 1
+ /autoapi/ocpapi/client/index
/autoapi/ocpapi/version/index
+ /autoapi/ocpapi/workflows/index
Attributes
@@ -115,6 +107,7 @@ Package Contents
.. py:property:: host
:type: str
+
The host being called by this client.
diff --git a/_sources/core/datasets/oc20.md b/_sources/core/datasets/oc20.md
index dc0aae4ef7..8ff3606f8c 100644
--- a/_sources/core/datasets/oc20.md
+++ b/_sources/core/datasets/oc20.md
@@ -134,9 +134,115 @@ Each tarball has README file containing details about file formats, number of st
#### Per-adsorbate trajectories (optional download)
-Adsorbate+catalyst trajectories on a per adsorbate basis are provided [here](./DATASET_PER_ADSORBATE.md) to avoid having to download all systems. Note - a few adsorbates are intentionally left out for the test splits.
+Download links are in the table below:
+
+|Adsorbate symbol |Downloadable path |size |MD5 checksum |
+|--- |--- |--- |--- |
+|*O |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/0.tar |1006M |d4151542856b4b6405f276808f75358a |
+|*H |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/1.tar |850M |3697f04faf04251a23da8b88a78209f7 |
+|*OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/2.tar |1.6G |a21081f3f55eb0c98a91021bbe3dac44 |
+|*OH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/3.tar |1.8G |b12b706854f5d899e02a9ae6578b5d45 |
+|*C |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/4.tar |1.1G |e4fe9890764fcf59e01e3ceab089b978 |
+|*CH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/6.tar |1.4G |ec9aa2c4c4bd4419359438ba7fbb881d |
+|*CHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/7.tar |1.4G |d32200f74ad5c3bfd42e8835f36d57ab |
+|*COH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/8.tar |1.6G |5418a1b331f6c7689a5405cca4cc8d15 |
+|*CH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/9.tar |1.6G |8ee1066149c305d7c17c219b369c5a73 |
+|*CH2*O |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/10.tar |1.7G |960c2450814024b66f3c79121179ac60 |
+|*CHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/11.tar |1.8G |60ac9f965f9589a3389483e3d1e58144 |
+|*CH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/12.tar |1.7G |7e123e6f4fb10d6897be3f47721dfd4a |
+|*OCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/13.tar |1.8G |0823047bbbe05fa0e63f9d83ec601487 |
+|*CH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/14.tar |1.9G |9ac71e198d75b1427182cd34abb73e4d |
+|*CH4 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/15.tar |1.9G |a405ce403018bf8afbd4425d5c0b34d5 |
+|*OHCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/16.tar |2.1G |d3c829f1952db6e4f428273ee05f59b1 |
+|*C*C |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/17.tar |1.5G |d687a151345305897b9245af4b0f9967 |
+|*CCO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/18.tar |1.7G |214ca96e620c5ec6e8a6ff8144a22a04 |
+|*CCH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/19.tar |1.6G |da2268545e80ca1664026449dd2fdd24 |
+|*CHCO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/20.tar |1.7G |386c99407fe63080d26cda525dfdd8cd |
+|*CCHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/21.tar |1.8G |918b20960438494ab160a9dbd9668157 |
+|*COCHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/22.tar |1.8G |84424aa2ad30301e23ece1438ea39923 |
+|*CCHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/23.tar |2.0G |3cc90425ec042a70085ba7eb2916a79a |
+|*CCH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/24.tar |1.8G |9dbcf7566e40965dd7f8a186a75a718e |
+|*CH*CH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/25.tar |1.7G |a193b4c72f915ba0b21a41790696b23c |
+|CH2*CO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/26.tar |1.8G |de83cf50247f5556fa4f9f64beff1eeb |
+|*CHCHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/27.tar |1.9G |1d140aaa2e7b287124ab38911a711d70 |
+|*CH*COH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/28.tar |1.3G |682d8a6b05ca5948b34dc5e5f6bbcd61 |
+|*COCH2O |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/29.tar |1.9G |c8742faa8ca40e8edb4110069817fa70 |
+|*CHO*CHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/30.tar |2.0G |8cfbb67beb312b98c40fcb891dfa480a |
+|*COHCHO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/31.tar |1.9G |6ffa903a62d8ec3319ecec6a03b06276 |
+|*COHCOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/32.tar |2.0G |caca0058b641bfdc9f8de4527e60feb7 |
+|*CCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/33.tar |1.8G |906543aaefc171edab388ff4f0fe8a20 |
+|*CHCH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/34.tar |1.8G |4dfab479495f76179749c1956046fbd8 |
+|*COCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/35.tar |1.9G |29d1b992715054e920e8bb2afe97b393 |
+|*CHCHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/38.tar |2.0G |9e5912df6f7b11706d1046cdb9e3087e |
+|*CCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/39.tar |2.1G |7bcae43cee451306e34ec416588a7f09 |
+|*CHOCHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/40.tar |2.0G |f98866d08fe3451ae7ebc47bb51599aa |
+|*COCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/41.tar |1.4G |bfaf689e5827fcf26c51e567bb8dd1be |
+|*COHCHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/42.tar |2.0G |236fe4e950aa2fbdde94ef2821fb48d2 |
+|*OCHCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/44.tar |2.1G |66acc5460a999625c3364f0f3bcca871 |
+|*COHCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/45.tar |2.1G |bb4a01956736399c8cee5e219f8c1229 |
+|*CHOHCH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/46.tar |2.1G |e836de4ec146b1b611533f1ef682cace |
+|*CHCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/47.tar |2.0G |66df44121806debef6dc038df7115d1d |
+|*OCH2CHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/48.tar |2.2G |ff6981fdbcd2e65d351505c15d218d76 |
+|*CHOCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/49.tar |2.1G |448f7d352ab6e32f754e24de64ca302a |
+|*COHCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/50.tar |2.1G |8bff6bf3e10cc84acc4a283a375fcc23 |
+|*CHOHCHOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/51.tar |2.0G |9c9e4d617d306751760a80f1453e71f1 |
+|*CH2CH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/52.tar |2.0G |ec1e964d2ee6f468fa5773743e3994a4 |
+|*OCH2CH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/53.tar |2.1G |d297b27b02822f9b6af80bdb64aee819 |
+|*CHOHCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/54.tar |2.1G |368de083dafdc3bbdb560d35e2a102c0 |
+|*CH2CH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/55.tar |2.1G |3c1aaf790659f7ff89bf1eed8b396b63 |
+|*CHOHCH2OH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/56.tar |2.2G |2d71adb9e305e6f3bca49e5df9b5a86a |
+|*OHCH2CH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/57.tar |2.3G |cf51128f8522b7b66fc68d79980d6def |
+|*NH2N(CH3)2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/58.tar |1.6G |36ba974d80c20ff636431f7c0ad225da |
+|*ONN(CH3)2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/59.tar |2.3G |fdc4cd19977496909d61be4aee61c4f1 |
+|*OHNNCH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/60.tar |2.1G |50a6ff098f9ba7adbba9ac115726cc5a |
+|*ONH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/62.tar |1.8G |47573199c545afe46c554ff756c3e38f |
+|*NHNH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/63.tar |1.7G |dd456b7e19ef592d9f0308d911b91d7c |
+|*N*NH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/65.tar |1.6G |c05289fd56d64c74306ebf57f1061318 |
+|*NO2NO2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/67.tar |2.1G |4822a06f6c5f41bdefd3cbbd8856c11f |
+|*N*NO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/68.tar |1.6G |2a27de122d32917cc5b6ac0a21c63c1c |
+|*N2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/69.tar |1.5G |cc668fecf679b6edaac8fd8fb9cdd404 |
+|*ONNH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/70.tar |2.1G |dff880f1a5baa7f67b52fd3ed745443d |
+|*NH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/71.tar |1.6G |c7f383b50faa6244e265c9611466cb8f |
+|*NH3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/72.tar |1.9G |2b355741f9300445703270e0e4b8c01c |
+|*NONH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/73.tar |1.8G |48877a0c6f2994baac82cb722711aaa2 |
+|*NH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/74.tar |1.4G |7979b9e7ab557d6979b33e352486f0ef |
+|*NO2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/75.tar |1.7G |9f352fbc32bb2b8caf4788aba28b2eb7 |
+|*NO |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/76.tar |1.4G |482ee306a5ae2eee78cac40d10059ebc |
+|*N |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/77.tar |1.1G |bfb6e03d4a687987ff68976f0793cc46 |
+|*NO3 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/78.tar |1.8G |700834326e789a6e38bf3922d9fcb792 |
+|*OHNH2 |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/79.tar |2.1G |fa24472e0c02c34d91f3ffe6b77bfb11 |
+|*ONOH |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/80.tar |1.4G |4ddcccd62a834a76fe6167461f512529 |
+|*CN |https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/81.tar |1.5G |bc7c55330ece006d09496a5ff01d5d50 |
+Note - A few adsorbates are intentionally left out for the test splits.
+
+Downloading any of the above and extracting will result in a folder :
+
+`/`
+
+* `system.txt` Text file containing information about the different adsorbate+catalyst system names. In total there are N systems. More details described below.
+* `/`
+ * This contains N compressed trajectory files of the format `.extxyz.xz`.
+ * Files are named as `.extxyz.xz` (where `system_id` is defined below).
+
+
+where, `` can be 0 to 81. N is dependent on which adsorbate index is chosen.
+
+
+
+The file `system.txt` has information in the following format:
+`system_id,reference_energy`
+
+where:
+
+* `system_id `- Internal random ID corresponding to an adsorbate+catalyst system.
+* `reference_energy` - Energy used to reference system energies to bare catalyst+gas reference energies. Used for adsorption energy calculations.
+
+
+The `.extxyz.xz` files are LZMA compressed `.extxyz` trajectory files. Each trajectory corresponds to a relaxation trajectory of a different adsorbate+catalyst system. Information about the `.extxyz` trajectory file format may be found at https://wiki.fysik.dtu.dk/ase/dev/ase/io/formatoptions.html#extxyz .
+
+In order to uncompress the files, `uncompress.py` provides a multi-core implementation which could be used.
### Catalyst system trajectories (optional download)
diff --git a/_sources/core/datasets/omat24.md b/_sources/core/datasets/omat24.md
new file mode 100644
index 0000000000..53217adc7c
--- /dev/null
+++ b/_sources/core/datasets/omat24.md
@@ -0,0 +1,121 @@
+# Open Materials 2024 (OMat24)
+
+## Overview
+The OMat24 dataset contains a mix of single point calculations of non-equilibrium structures and
+structural relaxations. The dataset contains structures labeled with total energy (eV), forces (eV/A)
+and stress (eV/A^3). The dataset is provided in ASE DB compatible lmdb files.
+
+The OMat24 train and val splits are fully compatible with the Matbench-Discovery benchmark test set.
+ 1. The splits do not contain any structure that has a protostructure label present in the initial or relaxed
+ structures of the WBM dataset.
+ 2. The splits do not include any structure that was generated starting from an Alexandria relaxed structure with
+ protostructure lable in the intitial or relaxed structures of the WBM datset.
+
+## Subdatasets
+OMat24 is made up of X subdatasets based on how the structures were generated. The subdatasets included are:
+1. rattled-1000-subsampled & rattled-1000
+2. rattled-500-subsampled & rattled-300
+3. rattled-300-subsampled & rattled-500
+4. aimd-from-PBE-1000-npt
+5. aimd-from-PBE-1000-nvt
+6. aimd-from-PBE-3000-npt
+7. aimd-from-PBE-3000-nvt
+8. rattled-relax
+
+**Note** There are two subdatasets for the rattled-< T > datasets. Both subdatasets in each pair were generated with the
+same procedure as described in our manuscript.
+
+## File contents and downloads
+
+### OMat24 train split
+| Sub-dataset | No. structures | File size | Download |
+|:------------------------:|:--------------:|:---------:|:--------------------------------------------------------------------------------------------------------------------------------------------:|
+| rattled-1000 | 122,937 | 21 GB | [rattled-1000.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-1000.tar.gz) |
+| rattled-1000-subsampled | 41,786 | 7.1 GB |[rattled-1000-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-1000-subsampled.tar.gz) |
+| rattled-500 | 75,167 | 13 GB | [rattled-500.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-500.tar.gz) |
+| rattled-500-subsampled | 43,068 | 7.3 GB | [rattled-500-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-500-subsampled.tar.gz) |
+| rattled-300 | 68,593 | 12 GB | [rattled-300.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-300.tar.gz) |
+| rattled-300-subsampled | 37,393 | 6.4 GB | [rattled-300-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-300-subsampled.tar.gz) |
+| aimd-from-PBE-1000-npt | 223,574 | 26 GB | [aimd-from-PBE-1000-npt.tar.gz](hhtps://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-1000-npt.tar.gz) |
+| aimd-from-PBE-1000-nvt | 215,589 | 24 GB | [aimd-from-PBE-1000-nvt.tar.gz](hhtps://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-1000-nvt.tar.gz) |
+| aimd-from-PBE-3000-npt | 65,244 | 25 GB | [aimd-from-PBE-3000-npt.tar.gz](hhtps://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-3000-npt.tar.gz) |
+| aimd-from-PBE-3000-nvt | 84,063 | 32 GB | [aimd-from-PBE-3000-npt.tar.gz](hhtps://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-3000-npt.tar.gz) |
+| rattled-relax | 99,968 | 12 GB | [rattled-relax.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-relax.tar.gz) |
+| Total | 1,077,382 | 185.8 GB |
+
+### OMat24 val split (this is a 1M subset used to train eqV2 models from the 5M val split)
+
+| Sub-dataset | Size | File Size | Download |
+|:-----------------------:|:---------:|:---------:|----------------------------------------------------------------------------------------------------------------------------------------------:|
+| rattled-1000 | 122,937 | 229 MB | [rattled-1000.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-1000.tar.gz) |
+| rattled-1000-subsampled | 41,786 | 80 MB | [rattled-1000-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-1000-subsampled.tar.gz) |
+| rattled-500 | 75,167 | 142 MB | [rattled-500.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-500.tar.gz) |
+| rattled-500-subsampled | 43,068 | 82 MB | [rattled-500-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-500-subsampled.tar.gz) |
+| rattled-300 | 68,593 | 128 MB | [rattled-300.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-300.tar.gz) |
+| rattled-300-subsampled | 37,393 | 72 MB | [rattled-300-subsampled.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-300-subsampled.tar.gz) |
+| aimd-from-PBE-1000-npt | 223,574 | 274 MB | [aimd-from-PBE-1000-npt.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-1000-npt.tar.gz) |
+| aimd-from-PBE-1000-nvt | 215,589 | 254 MB | [aimd-from-PBE-1000-nvt.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-1000-nvt.tar.gz) |
+| aimd-from-PBE-3000-npt | 65,244 | 296 MB | [aimd-from-PBE-3000-npt.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-3000-npt.tar.gz) |
+| aimd-from-PBE-3000-nvt | 84,063 | 382 MB | [aimd-from-PBE-3000-nvt.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/aimd-from-PBE-3000-nvt.tar.gz) |
+| rattled-relax | 99,968 | 124 MB | [rattled-relax.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/omat/val/rattled-relax.tar.gz) |
+| Total | 1,077,382 | 2.1 GB |
+
+
+### sAlex Dataset
+We also provide the sAlex dataset used for fine-tuning of our OMat models. sAlex is a subsampled, Matbench-Discovery compliant, version of the original [Alexandria](https://alexandria.icams.rub.de/).
+sAlex was created by removing structures matched in WBM and only sampling structure along a trajectory with an energy difference greater than 10 meV/atom. For full details,
+please see the manuscript.
+
+| Dataset | Split | No. Structures | File Size | Download |
+|:-------:|:-----:|:--------------:|:---------:|-------------------------------------------------------------------------------------------------------:|
+| sAlex | train | 10,447,765 | 7.6 GB | [train.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/sAlex/train.tar.gz) |
+| sAlex | val | 553,218 | 408 MB | [val.tar.gz](https://dl.fbaipublicfiles.com/opencatalystproject/data/omat/241018/sAlex/val.tar.gz) |
+
+
+## Getting ASE atoms objects
+Dataset files are written as `AseLMDBDatabase` objects which are an implementation of an [ASE Database](https://wiki.fysik.dtu.dk/ase/ase/db/db.html),
+in LMDB format. A single **.aselmdb* file can be read and queried like any other ASE DB.
+
+You can also read many DB files at once and access atoms objects using the `AseDBDataset` class.
+
+For example to read the **rattled-relax** subdataset,
+```python
+from fairchem.core.datasets import AseDBDataset
+
+dataset_path = "/path/to/omat24/train/rattled-relax"
+config_kwargs = {} # see tutorial on additiona configuration
+
+dataset = AseDBDataset(config=dict(src=dataset_path, **config_kwargs))
+
+# atoms objects can be retrieved by index
+atoms = dataset.get_atoms(0)
+```
+
+To read more than one subdataset you can simply pass a list of subdataset paths,
+```python
+from fairchem.core.datasets import AseDBDataset
+
+config_kwargs = {} # see tutorial on additiona configuration
+dataset_paths = [
+ "/path/to/omat24/train/rattled-relax",
+ "/path/to/omat24/train/rattled-1000-subsampled",
+ "/path/to/omat24/train/rattled-1000",
+]
+dataset = AseDBDataset(config=dict(src=dataset_paths, **config_kwargs))
+```
+To read all of the OMat24 training or validations splits simply pass the paths to all subdatasets.
+
+### Citing OMat24
+
+The OMat24 dataset is licensed under a [Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/legalcode).
+
+Please consider citing the following paper in any publications that uses this dataset:
+
+
+```bibtex
+
+```
+
+```bibtex
+
+```
diff --git a/_sources/core/model_checkpoints.md b/_sources/core/model_checkpoints.md
index e7c16658d7..dc18172edb 100644
--- a/_sources/core/model_checkpoints.md
+++ b/_sources/core/model_checkpoints.md
@@ -177,3 +177,58 @@ Please consider citing the following paper in any research manuscript using the
journal={arXiv preprint arXiv:2311.00341},
}
```
+
+
+## Open Materials 2024 (OMat24)
+
+* All config files for the OMat24 models are available in the [`configs/odac`](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24) directory.
+* All models are equiformerV2 S2EFS models
+
+**Note** in order to download any of the model checkpoints from the links below, you will need to first request access
+through the [OMMAT24 Hugging Face page](https://huggingface.co/fairchem/OMAT24).
+
+### OMat pretrained models
+
+These checkpoints are trained on OMat24 only. Note that predictions are *not* Materials Project compatible.
+
+| Model Name | Checkpoint | Config |
+|-----------------------|--------------|---------------------------------------------------------------------------------------------|
+| EquiformerV2-31M-OMat | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_31M_omat.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/all/eqV2_31M.yml) |
+| EquiformerV2-86M-OMat | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_86M_omat.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/all/eqV2_86M.yml) |
+| EquiformerV2-153M-OMat | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_153M_omat.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/all/eqV2_153M.yml) |
+
+
+### MPTrj only models
+These models are trained only on the [MPTrj]() dataset.
+
+| Model Name | Checkpoint | Config |
+|---------------------------|--------------|---------------------------------------------------------------------------------|
+| EquiformerV2-31M-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_31M_mp.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/mptrj/eqV2_31M_mptrj.yml) |
+| EquiformerV2-31M-DeNS-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_dens_31M_mp.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/mptrj/eqV2_31M_dens_mptrj.yml) |
+| EquiformerV2-86M-DeNS-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_dens_86M_mp.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/mptrj/eqV2_86M_dens_mptrj.yml) |
+| EquiformerV2-153M-DeNS-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_dens_153M_mp.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/mptrj/eqV2_153M_dens_mptrj.yml) |
+
+
+### Finetuned OMat models
+These models are finetuned from the OMat pretrained checkpoints using MPTrj or MPTrj and sub-sampled trajectories
+from the 3D PBE Alexandria dataset, which we call Alex.
+
+| Model Name | Checkpoint | Config |
+|--------------------------------|--------------|------------------------------------------------------------------------------------|
+| EquiformerV2-31M-OMat-Alex-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_31M_omat_mp_salex.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/finetune/eqV2_31M_ft_salexmptrj.yml) |
+| EquiformerV2-86M-OMat-Alex-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24/blob/main/eqV2_86M_omat_mp_salex.pt) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/finetune/eqV2_86M_ft_salexmptrj.yml) |
+| EquiformerV2-153M-OMat-Alex-MP | [checkpoint](https://huggingface.co/fairchem/OMAT24) | [config](https://github.com/FAIR-Chem/fairchem/tree/main/configs/omat24/finetune/eqV2_153M_ft_salexmptrj.yml) |
+
+
+Please consider citing the following work if you use OMat24 models in your work,
+```bibtex
+@article{barroso-luqueOpenMaterials20242024,
+ title = {Open Materials 2024 (OMat24) Inorganic Materials Dataset and Models},
+ author = {Barroso-Luque, Luis and Shuaibi, Muhammed and Fu, Xiang and Wood, Brandon M. and Dzamba, Misko and Gao, Meng and Rizvi, Ammar and Zitnick, C. Lawrence and Ulissi, Zachary W.},
+ date = {2024-10-16},
+ eprint = {2410.12771},
+ eprinttype = {arXiv},
+ doi = {10.48550/arXiv.2410.12771},
+ url = {http://arxiv.org/abs/2410.12771},
+}
+```
diff --git a/_sources/index.md b/_sources/index.md
index 6d23c919c6..3f89a21bec 100644
--- a/_sources/index.md
+++ b/_sources/index.md
@@ -38,6 +38,7 @@ tasks, data, and metrics, please read the documentations and respective papers:
- [ODAC23](core/datasets/odac)
- [OC20Dense](core/datasets/oc20dense)
- [OC20NEB](core/datasets/oc20neb)
+ - [OMat24](core/datasets/omat24)
#### Projects and models built on `fairchem`:
diff --git a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html
index 4a76ea51b6..8d453af24b 100644
--- a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html
+++ b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html
@@ -198,6 +198,7 @@
Open Direct Air Capture 2023 (ODAC23)
Open Catalyst 2020 Dense (OC20Dense)
Open Catalyst 2020 Nudged Elastic Band (OC20NEB)
+Open Materials 2024 (OMat24)
Pretrained FAIRChem models
@@ -284,8 +285,9 @@
Generic class to interface with various logging modules, e.g. wandb,
tensorboard, etc.
-
-
-project
-
-
-
-
-entity
-
-
-
-
-group
-
-
watch ( model , log_freq : int = 1000 ) → None
@@ -978,9 +961,6 @@ Module ContentsWandBLogger
+
classmethod register_model ( name : str )
@@ -862,6 +885,11 @@ Module Contents
+
+
+classmethod get_loss_class ( name )
+
+
classmethod get_model_class ( name : str )
@@ -968,11 +996,11 @@ Module Contents
@@ -1004,6 +1032,7 @@ Module ContentsRegistry.mapping
Registry.register_task()
Registry.register_dataset()
+Registry.register_loss()
Registry.register_model()
Registry.register_logger()
Registry.register_trainer()
@@ -1012,6 +1041,7 @@ Module ContentsRegistry.get_class()
Registry.get_task_class()
Registry.get_dataset_class()
+Registry.get_loss_class()
Registry.get_model_class()
Registry.get_logger_class()
Registry.get_trainer_class()
diff --git a/autoapi/core/common/relaxation/ase_utils/index.html b/autoapi/core/common/relaxation/ase_utils/index.html
index 9202f99d5e..b1b65eef81 100644
--- a/autoapi/core/common/relaxation/ase_utils/index.html
+++ b/autoapi/core/common/relaxation/ase_utils/index.html
@@ -65,7 +65,7 @@
-
+
@@ -198,6 +198,7 @@
Open Direct Air Capture 2023 (ODAC23)
Open Catalyst 2020 Dense (OC20Dense)
Open Catalyst 2020 Nudged Elastic Band (OC20NEB)
+Open Materials 2024 (OMat24)
Pretrained FAIRChem models
@@ -284,8 +285,9 @@