From 44c71b33fffcd80d9a4e03f2ef26c57992692cf7 Mon Sep 17 00:00:00 2001 From: Misko Date: Thu, 8 Aug 2024 08:39:55 -0700 Subject: [PATCH] fix issues with ddp/hydra and add tests (#796) * fix issues with ddp/hydra and add tests * remove load balancing for painn tests * remove painn parameters when using hydra * update test configs to be consistent with each other --- .../models/equiformer_v2/equiformer_v2.py | 12 ++++ src/fairchem/core/models/escn/escn.py | 4 +- .../core/models/gemnet_oc/gemnet_oc.py | 5 ++ src/fairchem/core/models/painn/painn.py | 3 +- tests/core/e2e/test_s2ef.py | 37 +++++++++-- tests/core/models/test_configs/test_dpp.yml | 41 ++++++++---- .../models/test_configs/test_dpp_hydra.yml | 41 ++++++++---- .../test_configs/test_equiformerv2_hydra.yml | 63 ++++++++++--------- .../models/test_configs/test_escn_hydra.yml | 53 +++++++++------- .../test_configs/test_gemnet_dt_hydra.yml | 52 ++++++++------- .../test_gemnet_dt_hydra_grad.yml | 53 +++++++++------- .../test_configs/test_gemnet_oc_hydra.yml | 56 +++++++++-------- .../test_gemnet_oc_hydra_grad.yml | 55 ++++++++-------- tests/core/models/test_configs/test_painn.yml | 41 ++++++++---- .../models/test_configs/test_painn_hydra.yml | 42 +++++++++---- .../core/models/test_configs/test_schnet.yml | 41 ++++++++---- tests/core/models/test_configs/test_scn.yml | 41 ++++++++---- 17 files changed, 415 insertions(+), 225 deletions(-) diff --git a/src/fairchem/core/models/equiformer_v2/equiformer_v2.py b/src/fairchem/core/models/equiformer_v2/equiformer_v2.py index 06a0280e9..bda8181c5 100644 --- a/src/fairchem/core/models/equiformer_v2/equiformer_v2.py +++ b/src/fairchem/core/models/equiformer_v2/equiformer_v2.py @@ -61,6 +61,7 @@ class EquiformerV2(nn.Module, GraphModelMixin): Args: use_pbc (bool): Use periodic boundary conditions + use_pbc_single (bool): Process batch PBC graphs one at a time regress_forces (bool): Compute forces otf_graph (bool): Compute graph On The Fly (OTF) max_neighbors (int): Maximum number of neighbors per atom @@ -683,6 +684,12 @@ def no_weight_decay(self) -> set: @registry.register_model("equiformer_v2_backbone") class EquiformerV2Backbone(EquiformerV2, BackboneInterface): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # TODO remove these once we deprecate/stop-inheriting EquiformerV2 class + self.energy_block = None + self.force_block = None + @conditional_grad(torch.enable_grad()) def forward(self, data: Batch) -> dict[str, torch.Tensor]: self.batch_size = len(data.natoms) @@ -815,6 +822,7 @@ def forward(self, data: Batch) -> dict[str, torch.Tensor]: class EquiformerV2EnergyHead(nn.Module, HeadInterface): def __init__(self, backbone): super().__init__() + self.avg_num_nodes = backbone.avg_num_nodes self.energy_block = FeedForwardNetwork( backbone.sphere_channels, @@ -828,6 +836,8 @@ def __init__(self, backbone): backbone.use_grid_mlp, backbone.use_sep_s2_act, ) + self.apply(backbone._init_weights) + self.apply(backbone._uniform_init_rad_func_linear_weights) def forward(self, data: Batch, emb: dict[str, torch.Tensor | GraphData]): node_energy = self.energy_block(emb["node_embedding"]) @@ -871,6 +881,8 @@ def __init__(self, backbone): backbone.use_sep_s2_act, alpha_drop=0.0, ) + self.apply(backbone._init_weights) + self.apply(backbone._uniform_init_rad_func_linear_weights) def forward(self, data: Batch, emb: dict[str, torch.Tensor]): forces = self.force_block( diff --git a/src/fairchem/core/models/escn/escn.py b/src/fairchem/core/models/escn/escn.py index d6367fa9a..62a582b4c 100644 --- a/src/fairchem/core/models/escn/escn.py +++ b/src/fairchem/core/models/escn/escn.py @@ -530,7 +530,7 @@ def forward(self, data: Batch) -> dict[str, torch.Tensor]: class eSCNEnergyHead(nn.Module, HeadInterface): def __init__(self, backbone): super().__init__() - + backbone.energy_block = None # Output blocks for energy and forces self.energy_block = EnergyBlock( backbone.sphere_channels_all, backbone.num_sphere_samples, backbone.act @@ -550,7 +550,7 @@ def forward( class eSCNForceHead(nn.Module, HeadInterface): def __init__(self, backbone): super().__init__() - + backbone.force_block = None self.force_block = ForceBlock( backbone.sphere_channels_all, backbone.num_sphere_samples, backbone.act ) diff --git a/src/fairchem/core/models/gemnet_oc/gemnet_oc.py b/src/fairchem/core/models/gemnet_oc/gemnet_oc.py index c9dd9e13e..c5e6efb00 100644 --- a/src/fairchem/core/models/gemnet_oc/gemnet_oc.py +++ b/src/fairchem/core/models/gemnet_oc/gemnet_oc.py @@ -1431,6 +1431,9 @@ def __init__( self.direct_forces = backbone.direct_forces self.force_scaler = backbone.force_scaler + backbone.out_mlp_E = None + backbone.out_energy = None + out_mlp_E = [ Dense( backbone.atom_emb.emb_size * (len(backbone.int_blocks) + 1), @@ -1495,6 +1498,8 @@ def __init__( emb_size_edge = backbone.edge_emb.dense.linear.out_features if self.direct_forces: + backbone.out_mlp_F = None + backbone.out_forces = None out_mlp_F = [ Dense( emb_size_edge * (len(backbone.int_blocks) + 1), diff --git a/src/fairchem/core/models/painn/painn.py b/src/fairchem/core/models/painn/painn.py index 33425e8d8..935ecc5a9 100644 --- a/src/fairchem/core/models/painn/painn.py +++ b/src/fairchem/core/models/painn/painn.py @@ -671,7 +671,7 @@ def forward(self, x, v): class PaiNNEnergyHead(nn.Module, HeadInterface): def __init__(self, backbone): super().__init__() - + backbone.out_energy = None self.out_energy = nn.Sequential( nn.Linear(backbone.hidden_channels, backbone.hidden_channels // 2), ScaledSiLU(), @@ -697,6 +697,7 @@ def __init__(self, backbone): self.direct_forces = backbone.direct_forces if self.direct_forces: + backbone.out_forces = None self.out_forces = PaiNNOutput(backbone.hidden_channels) def forward( diff --git a/tests/core/e2e/test_s2ef.py b/tests/core/e2e/test_s2ef.py index 9a68c4771..6c773d32e 100644 --- a/tests/core/e2e/test_s2ef.py +++ b/tests/core/e2e/test_s2ef.py @@ -210,7 +210,9 @@ def _run_main( class TestSmoke: - def smoke_test_train(self, input_yaml, tutorial_val_src, otf_norms=False): + def smoke_test_train( + self, input_yaml, tutorial_val_src, world_size, num_workers, otf_norms=False + ): with tempfile.TemporaryDirectory() as tempdirname: # first train a very simple model, checkpoint train_rundir = Path(tempdirname) / "train" @@ -221,7 +223,12 @@ def smoke_test_train(self, input_yaml, tutorial_val_src, otf_norms=False): rundir=str(train_rundir), input_yaml=input_yaml, update_dict_with={ - "optim": {"max_epochs": 2, "eval_every": 8, "batch_size": 5}, + "optim": { + "max_epochs": 2, + "eval_every": 8, + "batch_size": 5, + "num_workers": num_workers, + }, "dataset": oc20_lmdb_train_and_val_from_paths( train_src=str(tutorial_val_src), val_src=str(tutorial_val_src), @@ -231,6 +238,7 @@ def smoke_test_train(self, input_yaml, tutorial_val_src, otf_norms=False): }, save_checkpoint_to=checkpoint_path, save_predictions_to=training_predictions_filename, + world_size=world_size, ) assert "train/energy_mae" in acc.Tags()["scalars"] assert "val/energy_mae" in acc.Tags()["scalars"] @@ -313,10 +321,21 @@ def test_train_and_predict( configs, tutorial_val_src, ): + # test without ddp + self.smoke_test_train( + input_yaml=configs[model_name], + tutorial_val_src=tutorial_val_src, + otf_norms=otf_norms, + world_size=0, + num_workers=2, + ) + # test with ddp but no wokers self.smoke_test_train( input_yaml=configs[model_name], tutorial_val_src=tutorial_val_src, otf_norms=otf_norms, + world_size=1, + num_workers=0, ) def test_use_pbc_single(self, configs, tutorial_val_src, torch_deterministic): @@ -341,11 +360,21 @@ def test_use_pbc_single(self, configs, tutorial_val_src, torch_deterministic): @pytest.mark.parametrize( ("world_size", "ddp"), [ - pytest.param(2, True), + pytest.param( + 2, + True, + ), pytest.param(0, False), ], ) - def test_ddp(self, world_size, ddp, configs, tutorial_val_src, torch_deterministic): + def test_ddp( + self, + world_size, + ddp, + configs, + tutorial_val_src, + torch_deterministic, + ): with tempfile.TemporaryDirectory() as tempdirname: tempdir = Path(tempdirname) extra_args = {"seed": 0} diff --git a/tests/core/models/test_configs/test_dpp.yml b/tests/core/models/test_configs/test_dpp.yml index a79294bd1..82a85f300 100755 --- a/tests/core/models/test_configs/test_dpp.yml +++ b/tests/core/models/test_configs/test_dpp.yml @@ -1,20 +1,39 @@ trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + logger: name: tensorboard + model: name: dimenetplusplus #_bbwheads hidden_channels: 4 diff --git a/tests/core/models/test_configs/test_dpp_hydra.yml b/tests/core/models/test_configs/test_dpp_hydra.yml index 1120cc905..e41a39141 100755 --- a/tests/core/models/test_configs/test_dpp_hydra.yml +++ b/tests/core/models/test_configs/test_dpp_hydra.yml @@ -1,20 +1,39 @@ trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + logger: name: tensorboard + model: name: hydra backbone: diff --git a/tests/core/models/test_configs/test_equiformerv2_hydra.yml b/tests/core/models/test_configs/test_equiformerv2_hydra.yml index 4c00fe6a2..0f72570fd 100644 --- a/tests/core/models/test_configs/test_equiformerv2_hydra.yml +++ b/tests/core/models/test_configs/test_equiformerv2_hydra.yml @@ -1,7 +1,38 @@ - - trainer: forces +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 + +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + +logger: + name: tensorboard + model: name: hydra backbone: @@ -53,34 +84,6 @@ model: forces: module: equiformer_v2_force_head -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ - -logger: - name: tensorboard - -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 - - optim: batch_size: 5 eval_batch_size: 2 diff --git a/tests/core/models/test_configs/test_escn_hydra.yml b/tests/core/models/test_configs/test_escn_hydra.yml index ba5db1f53..c51d46fc3 100644 --- a/tests/core/models/test_configs/test_escn_hydra.yml +++ b/tests/core/models/test_configs/test_escn_hydra.yml @@ -1,31 +1,38 @@ trainer: forces -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True -logger: - name: tensorboard +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 -task: - dataset: lmdb - type: regression - metric: mae +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 + +logger: + name: tensorboard + model: name: hydra diff --git a/tests/core/models/test_configs/test_gemnet_dt_hydra.yml b/tests/core/models/test_configs/test_gemnet_dt_hydra.yml index a61274147..036ed689f 100644 --- a/tests/core/models/test_configs/test_gemnet_dt_hydra.yml +++ b/tests/core/models/test_configs/test_gemnet_dt_hydra.yml @@ -1,31 +1,37 @@ trainer: forces -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True -logger: - name: tensorboard +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 -task: - dataset: lmdb - type: regression - metric: mae +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 + +logger: + name: tensorboard model: name: hydra diff --git a/tests/core/models/test_configs/test_gemnet_dt_hydra_grad.yml b/tests/core/models/test_configs/test_gemnet_dt_hydra_grad.yml index 83d46bdd4..358dd1c86 100644 --- a/tests/core/models/test_configs/test_gemnet_dt_hydra_grad.yml +++ b/tests/core/models/test_configs/test_gemnet_dt_hydra_grad.yml @@ -1,31 +1,38 @@ trainer: forces -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True -logger: - name: tensorboard +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 -task: - dataset: lmdb - type: regression - metric: mae +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 + +logger: + name: tensorboard + model: name: hydra diff --git a/tests/core/models/test_configs/test_gemnet_oc_hydra.yml b/tests/core/models/test_configs/test_gemnet_oc_hydra.yml index 97343e90e..716718e3e 100644 --- a/tests/core/models/test_configs/test_gemnet_oc_hydra.yml +++ b/tests/core/models/test_configs/test_gemnet_oc_hydra.yml @@ -1,34 +1,38 @@ - - - trainer: forces -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True -logger: - name: tensorboard +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 -task: - dataset: lmdb - type: regression - metric: mae +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 + +logger: + name: tensorboard + model: name: hydra diff --git a/tests/core/models/test_configs/test_gemnet_oc_hydra_grad.yml b/tests/core/models/test_configs/test_gemnet_oc_hydra_grad.yml index 334c3cb4d..90001488b 100644 --- a/tests/core/models/test_configs/test_gemnet_oc_hydra_grad.yml +++ b/tests/core/models/test_configs/test_gemnet_oc_hydra_grad.yml @@ -1,34 +1,39 @@ - - trainer: forces -dataset: - train: - src: tutorial_dset/s2ef/train_100/ - normalize_labels: True - target_mean: -0.7554450631141663 - target_std: 2.887317180633545 - grad_target_mean: 0.0 - grad_target_std: 2.887317180633545 - val: - format: lmdb - src: tutorial_dset/s2ef/val_20/ +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True -logger: - name: tensorboard +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 -task: - dataset: lmdb - type: regression - metric: mae +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 + +logger: + name: tensorboard + model: name: hydra diff --git a/tests/core/models/test_configs/test_painn.yml b/tests/core/models/test_configs/test_painn.yml index c1f24d0bb..2edf662f6 100644 --- a/tests/core/models/test_configs/test_painn.yml +++ b/tests/core/models/test_configs/test_painn.yml @@ -1,17 +1,35 @@ + trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 + +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae logger: name: tensorboard @@ -30,7 +48,6 @@ model: optim: batch_size: 32 eval_batch_size: 32 - load_balancing: atoms eval_every: 5000 num_workers: 2 optimizer: AdamW diff --git a/tests/core/models/test_configs/test_painn_hydra.yml b/tests/core/models/test_configs/test_painn_hydra.yml index 0b39aa173..2c4731742 100644 --- a/tests/core/models/test_configs/test_painn_hydra.yml +++ b/tests/core/models/test_configs/test_painn_hydra.yml @@ -1,20 +1,39 @@ trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + logger: name: tensorboard + model: name: hydra backbone: @@ -38,7 +57,6 @@ model: optim: batch_size: 32 eval_batch_size: 32 - load_balancing: atoms eval_every: 5000 num_workers: 2 optimizer: AdamW diff --git a/tests/core/models/test_configs/test_schnet.yml b/tests/core/models/test_configs/test_schnet.yml index 97faf3962..842f7d2b6 100755 --- a/tests/core/models/test_configs/test_schnet.yml +++ b/tests/core/models/test_configs/test_schnet.yml @@ -1,20 +1,39 @@ trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + logger: name: tensorboard + model: name: schnet hidden_channels: 1024 diff --git a/tests/core/models/test_configs/test_scn.yml b/tests/core/models/test_configs/test_scn.yml index c080c4855..22d732fb6 100755 --- a/tests/core/models/test_configs/test_scn.yml +++ b/tests/core/models/test_configs/test_scn.yml @@ -1,21 +1,40 @@ # A total of 64 32GB GPUs were used for training. trainer: forces -task: - dataset: lmdb - type: regression - metric: mae - primary_metric: forces_mae - labels: - - potential energy - grad_input: atomic forces - train_on_free_atoms: True - eval_on_free_atoms: True - prediction_dtype: float32 +outputs: + energy: + shape: 1 + level: system + forces: + irrep_dim: 1 + level: atom + train_on_free_atoms: True + eval_on_free_atoms: True + +loss_functions: + - energy: + fn: mae + coefficient: 2 + - forces: + fn: l2mae + coefficient: 100 +evaluation_metrics: + metrics: + energy: + - mae + forces: + - mae + - cosine_similarity + - magnitude_error + misc: + - energy_forces_within_threshold + primary_metric: forces_mae + logger: name: tensorboard + model: name: scn num_interactions: 2