From 9d0bbb3d78f0690cc6b7626ae8c4950b56dc6cb5 Mon Sep 17 00:00:00 2001 From: Mike Walmsley Date: Thu, 30 May 2024 16:17:54 -0400 Subject: [PATCH] fix n_layers --- docs/guides/finetuning.rst | 6 +++--- .../examples/finetuning/finetune_binary_classification.py | 2 +- zoobot/pytorch/training/finetune.py | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/guides/finetuning.rst b/docs/guides/finetuning.rst index bce4fb56..3bac92e7 100755 --- a/docs/guides/finetuning.rst +++ b/docs/guides/finetuning.rst @@ -60,7 +60,7 @@ These files are called checkpoints (like video game save files - computer scient model = finetune.FinetuneableZoobotClassifier( name='hf_hub:mwalmsley/zoobot-encoder-convnext_nano', # which pretrained model to download num_classes=2, - n_layers=0 + n_blocks=0 ) You can see the list of pretrained models at :doc:`/pretrained_models`. @@ -68,8 +68,8 @@ You can see the list of pretrained models at :doc:`/pretrained_models`. What about the other arguments? When loading the checkpoint, FinetuneableZoobotClassifier will automatically change the head layer to suit a classification problem (hence, ``Classifier``). ``num_classes=2`` specifies how many classes we have, Here, two classes (a.k.a. binary classification). -``n_layers=0`` specifies how many layers (other than the output layer) we want to finetune. -0 indicates no other layers, so we will only be changing the weights of the output layer. +``n_blocks=0`` specifies how many inner blocks (groups of layers, excluding the output layer) we want to finetune. +0 indicates no other blocks, so we will only be changing the weights of the output layer. Prepare Galaxy Data diff --git a/zoobot/pytorch/examples/finetuning/finetune_binary_classification.py b/zoobot/pytorch/examples/finetuning/finetune_binary_classification.py index 4cf7efff..e4b44394 100644 --- a/zoobot/pytorch/examples/finetuning/finetune_binary_classification.py +++ b/zoobot/pytorch/examples/finetuning/finetune_binary_classification.py @@ -44,7 +44,7 @@ model = finetune.FinetuneableZoobotClassifier( name='hf_hub:mwalmsley/zoobot-encoder-convnext_nano', num_classes=2, - n_layers=0 # only updating the head weights. Set e.g. 1, 2 to finetune deeper. + n_blocks=0 # only updating the head weights. Set e.g. 1, 2 to finetune deeper. ) # under the hood, this does: # encoder = finetune.load_pretrained_encoder(checkpoint_loc) diff --git a/zoobot/pytorch/training/finetune.py b/zoobot/pytorch/training/finetune.py index 65a55a38..4588d7f4 100644 --- a/zoobot/pytorch/training/finetune.py +++ b/zoobot/pytorch/training/finetune.py @@ -68,7 +68,6 @@ class FinetuneableZoobotAbstract(pl.LightningModule): prog_bar (bool, optional): Print progress bar during finetuning. Defaults to True. visualize_images (bool, optional): Upload example images to WandB. Good for debugging but slow. Defaults to False. seed (int, optional): random seed to use. Defaults to 42. - n_layers: No effect, deprecated. Use n_blocks instead. """ def __init__( @@ -90,7 +89,6 @@ def __init__( learning_rate=1e-4, # 10x lower than typical, you may like to experiment dropout_prob=0.5, always_train_batchnorm=False, # temporarily deprecated - # n_layers=0, # for backward compat., n_blocks preferred. Now removed in v2. # these args are for the optional learning rate scheduler, best not to use unless you've tuned everything else already cosine_schedule=False, warmup_epochs=0, @@ -101,6 +99,7 @@ def __init__( # debugging utils prog_bar=True, visualize_images=False, # upload examples to wandb, good for debugging + n_layers=0, # deprecated (no effect) but can't remove yet as is an arg in some saved checkpoints seed=42, ): super().__init__()