Skip to content

Commit

Permalink
Merge pull request #97 from jdb78/maintenance/pytorch-lightning-1.0.0rc4
Browse files Browse the repository at this point in the history
Rename LearningRateLogger and reposition EarlyStopping callback for l…
  • Loading branch information
jdb78 authored Oct 12, 2020
2 parents c07d5ac + 9eeafb0 commit 599047e
Show file tree
Hide file tree
Showing 10 changed files with 43 additions and 33 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ documentation with detailed tutorials.

```python
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor

from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer

Expand Down Expand Up @@ -89,14 +89,13 @@ val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, nu


early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min")
lr_logger = LearningRateLogger()
lr_logger = LearningRateMonitor()
trainer = pl.Trainer(
max_epochs=100,
gpus=0,
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
limit_train_batches=30,
callbacks=[lr_logger],
callbacks=[lr_logger, early_stop_callback],
)


Expand Down
7 changes: 3 additions & 4 deletions docs/source/getting-started.rst
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ Example
.. code-block:: python
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
Expand Down Expand Up @@ -98,14 +98,13 @@ Example
# define trainer with early stopping
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min")
lr_logger = LearningRateLogger()
lr_logger = LearningRateMonitor()
trainer = pl.Trainer(
max_epochs=100,
gpus=0,
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
limit_train_batches=30,
callbacks=[lr_logger],
callbacks=[lr_logger, early_stop_callback],
)
# create the model
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorials/ar.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,7 @@
" gpus=0,\n",
" weights_summary=\"top\",\n",
" gradient_clip_val=0.1,\n",
" early_stop_callback=early_stop_callback,\n",
" callbacks=[early_stop_callback],\n",
" limit_train_batches=30,\n",
")\n",
"\n",
Expand Down Expand Up @@ -1097,7 +1097,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
"version": "3.8.3"
}
},
"nbformat": 4,
Expand Down
26 changes: 19 additions & 7 deletions docs/source/tutorials/stallion.ipynb

Large diffs are not rendered by default.

7 changes: 3 additions & 4 deletions examples/ar.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateLogger
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import torch

Expand Down Expand Up @@ -64,20 +64,19 @@
validation.save("validation.pkl")

early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=5, verbose=False, mode="min")
lr_logger = LearningRateLogger()
lr_logger = LearningRateMonitor()

trainer = pl.Trainer(
max_epochs=100,
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
limit_train_batches=30,
limit_val_batches=3,
# fast_dev_run=True,
# logger=logger,
# profiler=True,
callbacks=[lr_logger],
callbacks=[lr_logger, early_stop_callback],
)


Expand Down
2 changes: 1 addition & 1 deletion examples/nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
callbacks=[early_stop_callback],
limit_train_batches=15,
# limit_val_batches=1,
# fast_dev_run=True,
Expand Down
7 changes: 3 additions & 4 deletions examples/stallion.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateLogger
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import torch

Expand Down Expand Up @@ -93,21 +93,20 @@
validation.save("validation.pkl")

early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min")
lr_logger = LearningRateLogger()
lr_logger = LearningRateMonitor()

trainer = pl.Trainer(
max_epochs=100,
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
limit_train_batches=30,
# val_check_interval=20,
# limit_val_batches=1,
# fast_dev_run=True,
# logger=logger,
# profiler=True,
callbacks=[lr_logger],
callbacks=[lr_logger, early_stop_callback],
)


Expand Down
11 changes: 7 additions & 4 deletions pytorch_forecasting/models/temporal_fusion_transformer/tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from optuna.integration import PyTorchLightningPruningCallback, TensorBoardCallback
import pytorch_lightning as pl
from pytorch_lightning import Callback
from pytorch_lightning.callbacks import LearningRateLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import statsmodels.api as sm
import torch
Expand Down Expand Up @@ -99,16 +99,19 @@ def objective(trial: optuna.Trial) -> float:
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
metrics_callback = MetricsCallback()
learning_rate_callback = LearningRateLogger()
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform("gradient_clip_val", *gradient_clip_val_range)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
gpus=[0] if torch.cuda.is_available() else None,
callbacks=[metrics_callback, learning_rate_callback],
early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="val_loss"),
callbacks=[
metrics_callback,
learning_rate_callback,
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
],
logger=logger,
**trainer_kwargs,
)
Expand Down
3 changes: 1 addition & 2 deletions tests/test_models/test_nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger

from pytorch_forecasting.metrics import QuantileLoss
from pytorch_forecasting.models import NBeats


Expand All @@ -21,7 +20,7 @@ def test_integration(dataloaders_fixed_window_without_coveratiates, tmp_path, gp
gpus=gpus,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
callbacks=[early_stop_callback],
fast_dev_run=True,
logger=logger,
)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_models/test_temporal_fusion_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_integration(multiple_dataloaders_with_coveratiates, tmp_path, gpus):
gpus=gpus,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
callbacks=[early_stop_callback],
fast_dev_run=True,
logger=logger,
)
Expand Down

0 comments on commit 599047e

Please sign in to comment.