Skip to content

Commit

Permalink
Replace None strategy with auto in tutorial notebooks
Browse files Browse the repository at this point in the history
Signed-off-by: Abhishree <[email protected]>
  • Loading branch information
athitten committed Sep 26, 2023
1 parent 57b4427 commit 502790f
Show file tree
Hide file tree
Showing 18 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion tutorials/02_NeMo_Adapters.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1985,4 +1985,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
}
}
2 changes: 1 addition & 1 deletion tutorials/asr/ASR_TTS_Tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@
"config.trainer.max_epochs = NUM_EPOCHS\n",
"\n",
"config.trainer.devices = 1\n",
"config.trainer.strategy = None # use 1 device, no need for ddp strategy\n",
"config.trainer.strategy = auto # use 1 device, no need for ddp strategy\n",
"\n",
"OmegaConf.resolve(config)"
]
Expand Down
6 changes: 3 additions & 3 deletions tutorials/asr/Self_Supervised_Pre_Training.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@
" cfg.trainer.gpus = 1\n",
"else:\n",
" cfg.trainer.accelerator = 'cpu'\n",
" cfg.trainer.strategy = None\n",
" cfg.trainer.strategy = auto\n",
" cfg.trainer.gpus = 0\n",
"\n",
"cfg.exp_manager.exp_dir = data_dir + \"/content/exp\"\n",
Expand Down Expand Up @@ -538,7 +538,7 @@
" cfg.trainer.gpus = 1\n",
"else:\n",
" cfg.trainer.accelerator = 'cpu'\n",
" cfg.trainer.strategy = None\n",
" cfg.trainer.strategy = auto\n",
" cfg.trainer.gpus = 0\n",
"\n",
"cfg.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v128/\" # note this is a directory, not a path to a vocabulary file\n",
Expand Down Expand Up @@ -725,4 +725,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
}
}
2 changes: 1 addition & 1 deletion tutorials/asr/Speech_Commands.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@
"config.trainer.max_epochs = 5\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None"
"config.trainer.strategy = auto"
],
"execution_count": null,
"outputs": []
Expand Down
2 changes: 1 addition & 1 deletion tutorials/asr/Voice_Activity_Detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@
"config.trainer.max_epochs = 5\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None"
"config.trainer.strategy = auto"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@
"config.trainer.max_epochs = 10\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# Instantiate the trainer\n",
"trainer = pl.Trainer(**config.trainer)"
Expand Down Expand Up @@ -1144,7 +1144,7 @@
"config_dual_output.trainer.max_epochs = 10\n",
"\n",
"# Remove distributed training flags\n",
"config_dual_output.trainer.strategy = None\n",
"config_dual_output.trainer.strategy = auto\n",
"\n",
"# Instantiate the trainer\n",
"trainer = pl.Trainer(**config_dual_output.trainer)\n",
Expand Down Expand Up @@ -1313,4 +1313,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}
2 changes: 1 addition & 1 deletion tutorials/nlp/Entity_Linking_Medical.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@
"cfg.model.validation_ds.data_file = os.path.join(DATA_DIR, \"tiny_example_validation_pairs.tsv\")\n",
"\n",
"# remove distributed training flags\n",
"cfg.trainer.strategy = None\n",
"cfg.trainer.strategy = auto\n",
"cfg.trainer.accelerator = None"
]
},
Expand Down
4 changes: 2 additions & 2 deletions tutorials/nlp/GLUE_Benchmark.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@
"# config.trainer.amp_level = O1\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n",
"config.trainer.max_steps = 128\n",
Expand Down Expand Up @@ -563,4 +563,4 @@
]
}
]
}
}
4 changes: 2 additions & 2 deletions tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@
"# config.trainer.amp_level = O1\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# setup a small number of epochs for demonstration purposes of this tutorial\n",
"config.trainer.max_epochs = 5\n",
Expand Down Expand Up @@ -705,7 +705,7 @@
"config.trainer.accelerator = accelerator\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"trainer = pl.Trainer(**config.trainer)\n",
"config.exp_manager.exp_dir = os.path.join(DATA_DIR, \"output/\" + run_name)\n",
Expand Down
6 changes: 3 additions & 3 deletions tutorials/nlp/Punctuation_and_Capitalization.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,7 @@
"config.trainer.max_epochs = 1\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"trainer = pl.Trainer(**config.trainer)"
]
Expand Down Expand Up @@ -745,7 +745,7 @@
"config.trainer.accelerator = accelerator\n",
"config.trainer.precision = 16 if torch.cuda.is_available() else 32\n",
"config.trainer.max_epochs = 1\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# Exp manager\n",
"config.exp_manager.explicit_log_dir = 'tarred_experiment'\n",
Expand Down Expand Up @@ -1043,4 +1043,4 @@
},
"nbformat": 4,
"nbformat_minor": 1
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@
"config.trainer.max_epochs = 1\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"config.exp_manager.use_datetime_version=False\n",
"config.exp_manager.explicit_log_dir='Punctuation_And_Capitalization_Lexical_Audio'\n",
"\n",
Expand Down Expand Up @@ -860,7 +860,7 @@
"config.trainer.accelerator = accelerator\n",
"config.trainer.precision = 16 if torch.cuda.is_available() else 32\n",
"config.trainer.max_epochs = 1\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# Exp manager\n",
"config.exp_manager.explicit_log_dir = 'tarred_experiment'\n",
Expand Down
4 changes: 2 additions & 2 deletions tutorials/nlp/Relation_Extraction-BioMegatron.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@
"config.trainer.precision = 16 if torch.cuda.is_available() else 32\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"trainer = pl.Trainer(**config.trainer)"
]
Expand Down Expand Up @@ -652,4 +652,4 @@
},
"nbformat": 4,
"nbformat_minor": 1
}
}
6 changes: 3 additions & 3 deletions tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@
"# config.trainer.amp_level = O1\n",
"\n",
"# disable distributed training when using Colab to prevent the errors\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n",
"# Training stops when max_step or max_epochs is reached (earliest)\n",
Expand Down Expand Up @@ -573,7 +573,7 @@
"# create a copy of the trainer config and update it to be used for final evaluation\n",
"eval_trainer_cfg = config.trainer.copy()\n",
"eval_trainer_cfg.accelerator = 'gpu' if torch.cuda.is_available() else 'cpu' # it is safer to perform evaluation on single GPU as PT is buggy with the last batch on multi-GPUs\n",
"eval_trainer_cfg.strategy = None # 'ddp' is buggy with test process in the current PT, it looks like it has been fixed in the latest master\n",
"eval_trainer_cfg.strategy = auto # 'ddp' is buggy with test process in the current PT, it looks like it has been fixed in the latest master\n",
"eval_trainer = pl.Trainer(**eval_trainer_cfg)\n",
"\n",
"eval_trainer.test(model=eval_model, verbose=False) # test_dataloaders=eval_dataloader,\n"
Expand Down Expand Up @@ -832,4 +832,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}
2 changes: 1 addition & 1 deletion tutorials/nlp/Token_Classification-BioMegatron.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@
"config.trainer.precision = 16 if torch.cuda.is_available() else 32\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"trainer = pl.Trainer(**config.trainer)"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@
"# config.trainer.amp_level = O1\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n",
"config.trainer.max_steps = 32\n",
Expand Down Expand Up @@ -847,4 +847,4 @@
"metadata": {}
}
]
}
}
4 changes: 2 additions & 2 deletions tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@
"# config.trainer.amp_level = O1\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n",
"config.trainer.max_steps = 128\n",
Expand Down Expand Up @@ -671,4 +671,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}
2 changes: 1 addition & 1 deletion tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,7 @@
"source": [
"config.model.diarizer.speaker_embeddings.model_path=\"titanet_large\"\n",
"config.trainer.max_epochs = 5\n",
"config.trainer.strategy = None"
"config.trainer.strategy = auto"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@
"config.trainer.max_epochs = 10\n",
"\n",
"# Remove distributed training flags\n",
"config.trainer.strategy = None\n",
"config.trainer.strategy = auto\n",
"\n",
"# Remove augmentations\n",
"config.model.train_ds.augmentor=None"
Expand Down

0 comments on commit 502790f

Please sign in to comment.