From 09c70ba6e8dbe4c59ebbfa9c937d070ecfbbeb98 Mon Sep 17 00:00:00 2001 From: Mike Walmsley Date: Sat, 2 Mar 2024 09:26:43 -0500 Subject: [PATCH] pure pytorch? --- zoobot/pytorch/training/finetune.py | 32 ++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/zoobot/pytorch/training/finetune.py b/zoobot/pytorch/training/finetune.py index 31f4ff06..412752f8 100644 --- a/zoobot/pytorch/training/finetune.py +++ b/zoobot/pytorch/training/finetune.py @@ -253,21 +253,33 @@ def configure_optimizers(self): opt = torch.optim.AdamW(params, weight_decay=self.weight_decay) # lr included in params dict if self.cosine_schedule: - logging.info('Using cosine schedule, warmup for {} epochs, max for {} epochs'.format(self.warmup_epochs, self.max_cosine_epochs)) - from lightly.utils.scheduler import CosineWarmupScheduler # new dependency for zoobot, TBD - maybe just copy - # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers - # Dictionary, with an "optimizer" key, and (optionally) a "lr_scheduler" key whose value is a single LR scheduler or lr_scheduler_config. - lr_scheduler = CosineWarmupScheduler( + # logging.info('Using cosine schedule, warmup for {} epochs, max for {} epochs'.format(self.warmup_epochs, self.max_cosine_epochs)) + # from lightly.utils.scheduler import CosineWarmupScheduler # new dependency for zoobot, TBD - maybe just copy + # # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers + # # Dictionary, with an "optimizer" key, and (optionally) a "lr_scheduler" key whose value is a single LR scheduler or lr_scheduler_config. + # lr_scheduler = CosineWarmupScheduler( + # optimizer=opt, + # warmup_epochs=self.warmup_epochs, + # max_epochs=self.max_cosine_epochs, + # start_value=self.learning_rate, + # end_value=self.learning_rate * self.max_learning_rate_reduction_factor, + # ) + + logging.info('Using cosine schedule, warmup not supported, max for {} epochs'.format(self.max_cosine_epochs)) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer=opt, - warmup_epochs=self.warmup_epochs, - max_epochs=self.max_cosine_epochs, - start_value=self.learning_rate, - end_value=self.learning_rate * self.max_learning_rate_reduction_factor, + T_max=self.max_cosine_epochs, + eta_min=self.learning_rate * self.max_learning_rate_reduction_factor ) + # lr_scheduler_config default is frequency=1, interval=epoch return { "optimizer": opt, - "lr_scheduler": lr_scheduler + "lr_scheduler": { + 'scheduler': lr_scheduler, + 'interval': 'epoch', + 'frequency': 1 + } } else: return opt