Skip to content

Commit

Permalink
Adds auto-import helper function + config yamls for force models (for…
Browse files Browse the repository at this point in the history
… larger training sweeps) (#19)

* Adds helper functions for imports, config; main.py works for energy / force models

* Configs for force models

* Updates older configs to play well with the default trainer

* Reverts default logger to tensorboard

* Includes mmf license

* Removed hardcoded `mae`; reads metric from config
  • Loading branch information
abhshkdz authored Jun 19, 2020
1 parent 92bd0fb commit 357eea2
Show file tree
Hide file tree
Showing 22 changed files with 309 additions and 57 deletions.
20 changes: 20 additions & 0 deletions configs/co_cu_emt/base.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
trainer: md

dataset:
src: data/data/co_cu_emt
traj: COCu_emt_5images.traj
train_size: 5
val_size: 0
test_size: 0
normalize_labels: True

logger: tensorboard

task:
dataset: co_cu_md
description: "Regressing to energies and forces for an EMT trajectory of CO on Cu"
type: regression
metric: mae
labels:
- potential energy
grad_input: atomic forces
21 changes: 21 additions & 0 deletions configs/co_cu_emt/cgcnn.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
includes:
- configs/co_cu_emt/base.yml

model:
name: cgcnn
atom_embedding_size: 32
fc_feat_size: 64
num_fc_layers: 3
num_graph_conv_layers: 3

optim:
batch_size: 5
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 100
- 125
warmup_epochs: 50
warmup_factor: 0.2
max_epochs: 200
force_coefficient: 30
24 changes: 24 additions & 0 deletions configs/co_cu_emt/cnn3d_local.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
includes:
- configs/co_cu_emt/base.yml

model:
name: cnn3d_local
regress_forces: True
max_atomic_number: 90
num_conv1_filters: 16
num_conv2_filters: 32
num_conv3_filters: 32
num_conv4_filters: 32
display_weights: False

optim:
batch_size: 5
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 100
- 125
warmup_epochs: 50
warmup_factor: 0.2
max_epochs: 200
force_coefficient: 30
22 changes: 22 additions & 0 deletions configs/co_cu_emt/dimenet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
includes:
- configs/co_cu_emt/base.yml

model:
name: dimenet
hidden_channels: 128
num_blocks: 1
cutoff: 10.0
num_after_skip: 1
num_output_layers: 1

optim:
batch_size: 5
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 100
- 125
warmup_epochs: 50
warmup_factor: 0.2
max_epochs: 200
force_coefficient: 30
22 changes: 22 additions & 0 deletions configs/co_cu_emt/schnet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
includes:
- configs/co_cu_emt/base.yml

model:
name: schnet
hidden_channels: 128
num_filters: 128
num_interactions: 6
num_gaussians: 50
cutoff: 6.0

optim:
batch_size: 5
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 100
- 125
warmup_epochs: 50
warmup_factor: 0.2
max_epochs: 200
force_coefficient: 30
4 changes: 3 additions & 1 deletion configs/ulissigroup_co/base.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
trainer: simple

dataset:
src: data/data/ulissigroup_co
train_size: 14000
val_size: 1000
test_size: 1000

logger: wandb
logger: tensorboard

task:
dataset: ulissigroup_co
Expand Down
5 changes: 2 additions & 3 deletions configs/ulissigroup_co/cgcnn.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/ulissigroup_co/base.yml

model: cgcnn

model_attributes:
model:
name: cgcnn
atom_embedding_size: 64
num_graph_conv_layers: 6
fc_feat_size: 128 # projection layer after conv + pool layers
Expand Down
5 changes: 2 additions & 3 deletions configs/ulissigroup_co/cgcnn_gu.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/ulissigroup_co/base.yml

model: cgcnn_gu

model_attributes:
model:
name: cgcnn_gu
atom_embedding_size: 64
num_graph_conv_layers: 6
fc_feat_size: 128 # projection layer after conv + pool layers
Expand Down
19 changes: 19 additions & 0 deletions configs/ulissigroup_co/cnn3d_local.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
includes:
- configs/ulissigroup_co/base.yml

model:
name: cnn3d_local
regress_forces: False
max_atomic_number: 90
display_weights: False

optim:
max_epochs: 200
batch_size: 32
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 100
- 125
warmup_epochs: 50
warmup_factor: 0.2
5 changes: 2 additions & 3 deletions configs/ulissigroup_co/transformer.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/ulissigroup_co/base.yml

model: transformer

model_attributes:
model:
name: transformer
atom_embedding_size: 64
num_graph_conv_layers: 6
num_attention_heads: 3
Expand Down
2 changes: 2 additions & 0 deletions configs/ulissigroup_h/base.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
trainer: simple

dataset:
src: data/data/ulissigroup_h
train_size: 2560
Expand Down
5 changes: 2 additions & 3 deletions configs/ulissigroup_h/cgcnn.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/ulissigroup_h/base.yml

model: cgcnn

model_attributes:
model:
name: cgcnn
atom_embedding_size: 64
num_graph_conv_layers: 6
fc_feat_size: 128 # projection layer after conv + pool layers
Expand Down
4 changes: 3 additions & 1 deletion configs/xie_grossman_mat_proj/base.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
trainer: simple

dataset:
src: data/data/xie_grossman_mat_proj
train_size: 28000
val_size: 2900
test_size: 3000

logger: wandb
logger: tensorboard

task:
dataset: xie_grossman_mat_proj
Expand Down
5 changes: 2 additions & 3 deletions configs/xie_grossman_mat_proj/cgcnn.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/xie_grossman_mat_proj/base.yml

model: cgcnn

model_attributes:
model:
name: cgcnn
atom_embedding_size: 64
num_graph_conv_layers: 6
fc_feat_size: 128 # projection layer after conv + pool layers
Expand Down
5 changes: 2 additions & 3 deletions configs/xie_grossman_mat_proj/transformer.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
includes:
- configs/xie_grossman_mat_proj/base.yml

model: transformer

model_attributes:
model:
name: transformer
atom_embedding_size: 64
num_graph_conv_layers: 6
num_attention_heads: 5
Expand Down
30 changes: 30 additions & 0 deletions licenses/LICENSE.mmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
BSD License

For MMF software

Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.

* Neither the name Facebook nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 changes: 16 additions & 3 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,24 @@
from ocpmodels.common.flags import flags
from ocpmodels.trainers import BaseTrainer
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import build_config, setup_imports

if __name__ == "__main__":
parser = flags.get_parser()
args = parser.parse_args()

trainer = BaseTrainer(args)
trainer.load()
setup_imports()
config = build_config(args)

trainer = registry.get_trainer_class(config.get("trainer", "simple"))(
task=config["task"],
model=config["model"],
dataset=config["dataset"],
optimizer=config["optim"],
identifier=config["identifier"],
is_debug=config.get("is_debug", False),
is_vis=config.get("is_vis", False),
print_every=config.get("print_every", 10),
seed=config.get("seed", 0),
logger=config.get("logger", "tensorboard"),
)
trainer.train()
10 changes: 7 additions & 3 deletions ocpmodels/common/logger.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import torch
import wandb
from torch.utils.tensorboard import SummaryWriter

from ocpmodels.common.registry import registry
from torch.utils.tensorboard import SummaryWriter


class Logger:
Expand Down Expand Up @@ -67,11 +66,16 @@ def __init__(self, config):
super().__init__(config)
self.writer = SummaryWriter(self.config["cmd"]["logs_dir"])

# TODO: add a model hook for watching gradients.
def watch(self, model):
print("NOTE: model gradient logging to tensorboard not yet supported.")
return False

def log(self, update_dict, step=None, split=""):
update_dict = super().log(update_dict, step, split)
for key in update_dict:
if torch.is_tensor(update_dict[key]):
self.writer.add_scalar(key, update_dict[key].val, step)
self.writer.add_scalar(key, update_dict[key].item(), step)
else:
assert isinstance(update_dict[key], int) or isinstance(
update_dict[key], float
Expand Down
Loading

0 comments on commit 357eea2

Please sign in to comment.