From 5b2a6bc6426d6a22c36562591a0a2b119e5abf70 Mon Sep 17 00:00:00 2001 From: Hemil Desai Date: Tue, 19 Nov 2024 12:07:35 -0800 Subject: [PATCH] fix Signed-off-by: Hemil Desai --- nemo/collections/llm/gpt/model/llama.py | 5 ++++- nemo/lightning/io/connector.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/nemo/collections/llm/gpt/model/llama.py b/nemo/collections/llm/gpt/model/llama.py index 804d3892d0c7..04540294d82a 100644 --- a/nemo/collections/llm/gpt/model/llama.py +++ b/nemo/collections/llm/gpt/model/llama.py @@ -344,7 +344,10 @@ def apply(self, output_path: Path) -> Path: target = target.cpu() target.save_pretrained(output_path) - self.tokenizer.save_pretrained(output_path) + try: + self.tokenizer.save_pretrained(output_path) + except Exception: + logging.warning("Failed to save tokenizer") return output_path diff --git a/nemo/lightning/io/connector.py b/nemo/lightning/io/connector.py index a38be6ee8f0a..62d5a7e318ca 100644 --- a/nemo/lightning/io/connector.py +++ b/nemo/lightning/io/connector.py @@ -226,7 +226,7 @@ def nemo_load( from nemo.lightning import MegatronStrategy, Trainer, _strategy_lib from nemo.lightning.io.api import load_context - model = load_context(path).model + model = load_context(path, subpath="model") _trainer = trainer or Trainer( devices=1, accelerator="cpu" if cpu else "gpu",