Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implementation of algorithm one from the paper #8

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/omniglot/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
help='Turn off batch normalization')

parser.add_argument('--meta_model', type=str, default='warp_leap',
help='Meta-learner [warp_leap, leap, reptile,'
help='Meta-learner [warp_leap, warp_online, leap, reptile,'
'maml, fomaml, ft, no]')
parser.add_argument('--inner_opt', type=str, default='sgd',
help='Optimizer in inner (task) loop: SGD or Adam')
Expand Down
32 changes: 22 additions & 10 deletions src/omniglot/model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
"""Base Omniglot models. Based on original implementation:
https://github.com/amzn/metalearn-leap
"""
import torch
import torch.nn as nn
from wrapper import (WarpGradWrapper, LeapWrapper, MAMLWrapper, NoWrapper,
FtWrapper, FOMAMLWrapper, ReptileWrapper)
FtWrapper, FOMAMLWrapper, ReptileWrapper,
WarpGradOnlineWrapper)

NUM_CLASSES = 50
ACT_FUNS = {
Expand Down Expand Up @@ -43,14 +45,25 @@ def get_model(args, criterion):
print(model)

if "warp" in args.meta_model.lower():
return WarpGradWrapper(
model,
args.inner_opt,
args.outer_opt,
args.inner_kwargs,
args.outer_kwargs,
args.meta_kwargs,
criterion)
# this uses online algorithm wrapper
if "online" in args.meta_model.lower():
return WarpGradOnlineWrapper(
model,
args.inner_opt,
args.outer_opt,
args.inner_kwargs,
args.outer_kwargs,
args.meta_kwargs,
criterion)
else:
return WarpGradWrapper(
model,
args.inner_opt,
args.outer_opt,
args.inner_kwargs,
args.outer_kwargs,
args.meta_kwargs,
criterion)

if args.meta_model.lower() == 'leap':
return LeapWrapper(
Expand Down Expand Up @@ -479,7 +492,6 @@ def init_adaptation(self):
"""Reset stats for new task"""
# Reset head if multi-headed, otherwise null-op
self.head.reset_parameters()

# Reset BN running stats
for m in self.modules():
if hasattr(m, 'reset_running_stats'):
Expand Down
193 changes: 184 additions & 9 deletions src/omniglot/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,11 @@
from leap.utils import clone_state_dict

from utils import Res, AggRes
from warpgrad import SGD
from warpgrad.utils import step, backward, unfreeze, freeze, copy


class BaseWrapper(object):

"""Generic training wrapper.

Arguments:
Expand Down Expand Up @@ -123,7 +124,7 @@ def run_batches(self, batches, optimizer, train=False, meta_train=False):
if not train:
continue

final = (n+1) == N
final = (n + 1) == N
loss.backward()

if meta_train:
Expand All @@ -139,8 +140,187 @@ def run_batches(self, batches, optimizer, train=False, meta_train=False):
return res


class WarpGradWrapper(BaseWrapper):
class WarpGradOnlineWrapper(BaseWrapper):
"""Wrapper around WarpGrad meta-learners using online learning algorithm 1.

Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""

def __init__(self,
model,
optimizer_cls,
meta_optimizer_cls,
optimizer_kwargs,
meta_optimizer_kwargs,
meta_kwargs,
criterion):

optimizer_parameters = warpgrad.OptimizerParameters(
trainable=meta_kwargs.pop('learn_opt', False),
default_lr=optimizer_kwargs['lr'],
default_momentum=optimizer_kwargs['momentum']
if 'momentum' in optimizer_kwargs else 0.)

# For now it is a dummy updater does nothing in backward call.
updater = warpgrad.SimpleUpdater(criterion, **meta_kwargs)

# we don't need replay buffer for algorithm1
model = warpgrad.Warp(model=model,
adapt_modules=list(model.adapt_modules()),
warp_modules=list(model.warp_modules()),
updater=updater,
buffer=None,
optimizer_parameters=optimizer_parameters)

super(WarpGradOnlineWrapper, self).__init__(criterion,
model,
optimizer_cls,
optimizer_kwargs)

self.meta_optimizer_cls = optim.SGD \
if meta_optimizer_cls.lower() == 'sgd' else optim.Adam
lra = meta_optimizer_kwargs.pop(
'lr_adapt', meta_optimizer_kwargs['lr'])
lri = meta_optimizer_kwargs.pop(
'lr_init', meta_optimizer_kwargs['lr'])
lrl = meta_optimizer_kwargs.pop(
'lr_lr', meta_optimizer_kwargs['lr'])
self.meta_optimizer = self.meta_optimizer_cls(
[{'params': self.model.init_parameters(), 'lr': lri},
{'params': self.model.warp_parameters(), 'lr': lra},
{'params': self.model.optimizer_parameters(), 'lr': lrl}],
**meta_optimizer_kwargs)

# This is the meta loss that we are going to accumulate.
self.meta_loss = 0

def _partial_meta_update(self, loss, final):
pass

def _final_meta_update(self):

def step_fn():
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()

self.model.backward(step_fn, **self.optimizer_kwargs)

def run_tasks(self, tasks, meta_train):
"""Train on a mini-batch tasks and evaluate test performance.

Arguments:
tasks (list, torch.utils.data.DataLoader): list of task-specific
dataloaders.
meta_train (bool): whether current run in during meta-training.
"""
results = []
self.meta_loss = 0
for task in tasks:
task.dataset.train()
trainres = self.run_task(task, train=True, meta_train=meta_train)
task.dataset.eval()
valres = self.run_task(task, train=False, meta_train=False)
results.append((trainres, valres))
##
results = AggRes(results)

# Meta gradient step
if meta_train:
# at the end of collection for K steps N tasks we do the backward
# pass.
meta_parameters = self.model.meta_parameters(include_init=False)
unfreeze(meta_parameters)
backward(self.meta_loss, meta_parameters)
self._final_meta_update()
freeze(meta_parameters)

return results

def run_task(self, task, train, meta_train):
"""Run model on a given task, first adapting and then evaluating"""
self.model.no_collect()
optimizer = None
if train:
# TODO: Discuss implementation and correct it.
# This line breakes gradient computation for now
# meta_layers required_grad properties are set to False if
# we call init_adaptation
copy(self.model.adapt_state(), self.model.init_state())
freeze(self.model.meta_parameters())
unfreeze(self.model.adapt_parameters())
self.model.train()

optimizer = self.optimizer_cls(
self.model.optimizer_parameter_groups(),
**self.optimizer_kwargs)
else:
self.model.eval()

return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)

def run_batches(self, batches, optimizer, train=False, meta_train=False):
"""Iterate over task-specific batches.

Arguments:
batches (torch.utils.data.DataLoader): task-specific dataloaders.
optimizer (torch.nn.optim): optimizer instance if training is True.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
device = next(self.model.parameters()).device
self.model.no_collect()
res = Res()
N = len(batches)
for n, (input, target) in enumerate(batches):
inner_input = input.to(device, non_blocking=True)
inner_target = target.to(device, non_blocking=True)

# Evaluate model
prediction = self.model(inner_input)
loss = self.criterion(prediction, inner_target)

res.log(loss=loss.item(), pred=prediction, target=inner_target)

# TRAINING #
if not train:
continue

final = (n + 1) == N
loss.backward()

if meta_train:
unfreeze(self.model.meta_parameters())
opt = SGD(self.model.optimizer_parameter_groups(tensor=True))
opt.zero_grad()
outer_input, outer_target = next(iter(batches))
l_outer, (l_inner, a1, a2) = step(
criterion=self.criterion,
x_inner=inner_input, x_outer=outer_input,
y_inner=inner_target, y_outer=outer_target,
model=self.model,
optimizer=opt, scorer=None)
self.meta_loss = self.meta_loss + l_outer
freeze(self.model.meta_parameters())
del l_inner, a1, a2

optimizer.step()
optimizer.zero_grad()
if final:
break
res.aggregate()
return res


class WarpGradWrapper(BaseWrapper):
"""Wrapper around WarpGrad meta-learners.

Arguments:
Expand Down Expand Up @@ -242,7 +422,6 @@ def run_task(self, task, train, meta_train):


class LeapWrapper(BaseWrapper):

"""Wrapper around the Leap meta-learner.

Arguments:
Expand Down Expand Up @@ -294,7 +473,6 @@ def run_task(self, task, train, meta_train):


class MAMLWrapper(object):

"""Wrapper around the MAML meta-learner.

Arguments:
Expand Down Expand Up @@ -358,7 +536,6 @@ def run_meta_batch(self, meta_batch, meta_train):


class NoWrapper(BaseWrapper):

"""Wrapper for baseline without any meta-learning.

Arguments:
Expand All @@ -367,6 +544,7 @@ class NoWrapper(BaseWrapper):
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""

def __init__(self, model, optimizer_cls, optimizer_kwargs, criterion):
super(NoWrapper, self).__init__(criterion,
model,
Expand All @@ -390,7 +568,6 @@ def _final_meta_update(self):


class _FOWrapper(BaseWrapper):

"""Base wrapper for First-order MAML and Reptile.

Arguments:
Expand Down Expand Up @@ -476,7 +653,6 @@ def _final_meta_update(self):


class ReptileWrapper(_FOWrapper):

"""Wrapper for Reptile.

Arguments:
Expand Down Expand Up @@ -515,7 +691,6 @@ def __init__(self, *args, **kwargs):


class FtWrapper(BaseWrapper):

"""Wrapper for Multi-headed finetuning.

This wrapper differs from others in that it blends batches from all tasks
Expand Down
2 changes: 1 addition & 1 deletion src/warpgrad/warpgrad/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from .warpgrad import Warp, OptimizerParameters, ReplayBuffer
from .updaters import DualUpdater
from .updaters import DualUpdater, SimpleUpdater
from .optim import SGD, Adam
Loading