Skip to content

Commit

Permalink
Merge branch 'main' of github.com:Lightning-AI/litgpt into feature/lo…
Browse files Browse the repository at this point in the history
…nglora
  • Loading branch information
belerico committed May 9, 2024
2 parents 909ce04 + 6f03d6c commit 6f631b3
Show file tree
Hide file tree
Showing 30 changed files with 72 additions and 159 deletions.
39 changes: 39 additions & 0 deletions .github/workflows/publish.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# To create a release, create a tag and push it to GitHub:
#git tag -a "v0.0.1-beta" -m "beta version testing"
#git push --tags
# https://dev.to/iamtekson/publish-package-to-pypi-and-release-new-version-using-github-actions-108k
name: Publish LitGPT to PyPI

on:
push:
tags:
- "v*"
jobs:
build-n-publish:
name: Build and publish to PyPI
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/litgpt
permissions:
id-token: write

steps:
- name: Checkout source
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.x"

- name: Build source and wheel distributions
run: |
python -m pip install --upgrade build twine
python -m build
twine check --strict dist/*
- name: Publish distribution to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
6 changes: 0 additions & 6 deletions litgpt/chat/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,3 @@ def main(
file=sys.stderr,
)
fabric.print()


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
4 changes: 0 additions & 4 deletions litgpt/deploy/serve.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,3 @@ def run_server(
devices=devices)

server.run(port=port)


if __name__ == "__main__":
CLI(run_server)
4 changes: 0 additions & 4 deletions litgpt/eval/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,3 @@ def convert_and_evaluate(
torch_random_seed=seed,
)
prepare_results(results, save_filepath)


if __name__ == "__main__":
CLI(convert_and_evaluate)
7 changes: 0 additions & 7 deletions litgpt/finetune/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from litgpt.prompts import save_prompt_style
from litgpt.tokenizer import Tokenizer
from litgpt.utils import (
CLI,
CycleIterator,
check_valid_checkpoint_dir,
choose_logger,
Expand Down Expand Up @@ -396,9 +395,3 @@ def validate_args(train: TrainArgs, eval: EvalArgs) -> None:
issues.append(f"{__file__} requires either epochs or max_steps to be set. This is set in {train}")
if issues:
raise ValueError("\n".join(issues))


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(setup)
7 changes: 0 additions & 7 deletions litgpt/finetune/adapter_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from litgpt.prompts import save_prompt_style
from litgpt.tokenizer import Tokenizer
from litgpt.utils import (
CLI,
CycleIterator,
check_valid_checkpoint_dir,
choose_logger,
Expand Down Expand Up @@ -396,9 +395,3 @@ def validate_args(train: TrainArgs, eval: EvalArgs) -> None:
issues.append(f"{__file__} requires either epochs or max_steps to be set. This is set in {train}")
if issues:
raise ValueError("\n".join(issues))


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(setup)
7 changes: 0 additions & 7 deletions litgpt/finetune/full.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from litgpt.prompts import save_prompt_style
from litgpt.tokenizer import Tokenizer
from litgpt.utils import (
CLI,
CycleIterator,
check_valid_checkpoint_dir,
choose_logger,
Expand Down Expand Up @@ -420,9 +419,3 @@ def validate_longlora_args(config: Config, longlora: LongLoraArgs):
f"LongLora context length ({longlora.context_length}) must be a multiple of the number of groups "
f"({longlora.n_groups})."
)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(setup)
9 changes: 1 addition & 8 deletions litgpt/finetune/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
from litgpt.scripts.merge_lora import merge_lora
from litgpt.tokenizer import Tokenizer
from litgpt.utils import (
CLI,
CycleIterator,
check_valid_checkpoint_dir,
choose_logger,
Expand Down Expand Up @@ -502,10 +501,4 @@ def validate_longlora_args(config: Config, longlora: LongLoraArgs):
raise ValueError(
f"LongLora context length ({longlora.context_length}) must be a multiple of the number of groups "
f"({longlora.n_groups})."
)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(setup)
)
6 changes: 0 additions & 6 deletions litgpt/generate/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,3 @@ def main(
fabric.print(f"\n\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr)
if fabric.device.type == "cuda":
fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
8 changes: 1 addition & 7 deletions litgpt/generate/adapter_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from litgpt.adapter_v2 import GPT, Config
from litgpt.generate.base import generate
from litgpt.prompts import has_prompt_style, load_prompt_style
from litgpt.utils import CLI, check_valid_checkpoint_dir, get_default_supported_precision, lazy_load
from litgpt.utils import check_valid_checkpoint_dir, get_default_supported_precision, lazy_load


def main(
Expand Down Expand Up @@ -123,9 +123,3 @@ def main(
fabric.print(f"\n\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr)
if fabric.device.type == "cuda":
fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
6 changes: 0 additions & 6 deletions litgpt/generate/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,9 +264,3 @@ def main(
)
if fabric.device.type == "cuda":
fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
8 changes: 1 addition & 7 deletions litgpt/generate/full.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from litgpt import GPT, Config, PromptStyle, Tokenizer
from litgpt.generate.base import generate
from litgpt.prompts import has_prompt_style, load_prompt_style
from litgpt.utils import CLI, check_valid_checkpoint_dir, get_default_supported_precision, load_checkpoint
from litgpt.utils import check_valid_checkpoint_dir, get_default_supported_precision, load_checkpoint


def main(
Expand Down Expand Up @@ -141,9 +141,3 @@ def main(
fabric.print(f"\n\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr)
if fabric.device.type == "cuda":
fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
8 changes: 1 addition & 7 deletions litgpt/generate/sequentially.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import litgpt.generate.base as generate_base
from litgpt import GPT, Config, Tokenizer
from litgpt.model import Block, build_mask_cache
from litgpt.utils import CLI, check_valid_checkpoint_dir, get_default_supported_precision
from litgpt.utils import check_valid_checkpoint_dir, get_default_supported_precision


@torch.inference_mode()
Expand Down Expand Up @@ -254,9 +254,3 @@ def main(
f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr
)
print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
6 changes: 0 additions & 6 deletions litgpt/generate/tp.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,9 +248,3 @@ def main(
)
if fabric.device.type == "cuda":
fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(main)
5 changes: 0 additions & 5 deletions litgpt/pretrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,8 +443,3 @@ def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resu
if issues:
raise ValueError("\n".join(issues))


if __name__ == "__main__":
torch.set_float32_matmul_precision("high")

CLI(setup)
6 changes: 0 additions & 6 deletions litgpt/scripts/convert_hf_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,9 +349,3 @@ def convert_hf_checkpoint(
gc.collect()
print(f"Saving converted checkpoint to {checkpoint_dir}")
saver.save(sd)


if __name__ == "__main__":
from jsonargparse import CLI

CLI(convert_hf_checkpoint)
6 changes: 1 addition & 5 deletions litgpt/scripts/convert_lit_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from litgpt import Config
from litgpt.scripts.convert_hf_checkpoint import layer_template, load_param
from litgpt.utils import CLI, incremental_save, lazy_load
from litgpt.utils import incremental_save, lazy_load


def copy_weights_falcon(
Expand Down Expand Up @@ -265,7 +265,3 @@ def convert_lit_checkpoint(checkpoint_dir: Path, output_dir: Path) -> None:
copy_fn(sd, lit_weights, saver=saver)
gc.collect()
saver.save(sd)


if __name__ == "__main__":
CLI(convert_lit_checkpoint)
4 changes: 0 additions & 4 deletions litgpt/scripts/convert_pretrained_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,3 @@ def convert_pretrained_checkpoint(checkpoint_dir: Path, output_dir: Path) -> Non
saver.save(converted_state_dict)

copy_config_files(checkpoint_dir, output_dir)


if __name__ == "__main__":
CLI(convert_pretrained_checkpoint)
4 changes: 0 additions & 4 deletions litgpt/scripts/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,3 @@ def gated_repo_catcher(repo_id: str, access_token: Optional[str]):
f" visit https://huggingface.co/{repo_id} for more information."
) from None
raise e from None


if __name__ == "__main__":
CLI(download_from_hub)
4 changes: 0 additions & 4 deletions litgpt/scripts/merge_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,3 @@ def load_lora_metadata(checkpoint_dir: Path) -> Tuple[Dict[str, Any], Path, Opti
pretrained_checkpoint_dir = Path(hparams["checkpoint_dir"])
precision = hparams.get("precision")
return lora_params, pretrained_checkpoint_dir, precision


if __name__ == "__main__":
CLI(merge_lora)
1 change: 1 addition & 0 deletions litgpt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,7 @@ def save_hyperparameters(function: callable, checkpoint_dir: Path) -> None:
("finetune", "lora"),
("finetune", "adapter"),
("finetune", "adapter_v2"),
("finetune",),
("pretrain",),
]
for known_command in known_commands:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ all = [
"sentencepiece>=0.2.0", # llama-based models
"tokenizers>=0.15.2", # pythia, falcon, redpajama
"requests>=2.31.0", # litgpt.data
"litdata>=0.2.2", # litgpt.data
"litdata>=0.2.2,<0.2.6", # litgpt.data
"litserve>=0.1.0", # litgpt.deploy
"zstandard>=0.22.0", # litgpt.data.prepare_slimpajama.py
"pandas>=1.9.0", # litgpt.data.prepare_starcoder.py
Expand Down
9 changes: 2 additions & 7 deletions tests/test_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,8 @@ def test_main(mocked_input, stop_iteration, fake_checkpoint_dir, monkeypatch, te
assert re.match("Now chatting with Llama 3.*>> .*Reply: foo bar baz", out.getvalue(), re.DOTALL)


@pytest.mark.parametrize("mode", ["file", "entrypoint"])
def test_cli(mode):
if mode == "file":
cli_path = Path(__file__).parent.parent / "litgpt/chat/base.py"
args = [sys.executable, cli_path, "-h"]
else:
args = ["litgpt", "chat", "-h"]
def test_cli():
args = ["litgpt", "chat", "-h"]
output = subprocess.check_output(args)
output = str(output.decode())
assert "Starts a conversation" in output
Expand Down
12 changes: 2 additions & 10 deletions tests/test_evaluate.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.

import subprocess
import sys
from contextlib import redirect_stdout
from dataclasses import asdict
from io import StringIO
from pathlib import Path
from unittest import mock

import pytest
import torch
import yaml

Expand Down Expand Up @@ -43,13 +40,8 @@ def test_evaluate_script(tmp_path):
assert "Loading checkpoint shards" not in stdout


@pytest.mark.parametrize("mode", ["file", "entrypoint"])
def test_cli(mode):
if mode == "file":
cli_path = Path(__file__).parent.parent / "litgpt/eval/evaluate.py"
args = [sys.executable, cli_path, "-h"]
else:
args = ["litgpt", "evaluate", "-h"]
def test_cli():
args = ["litgpt", "evaluate", "-h"]
output = subprocess.check_output(args)
output = str(output.decode())
assert "run the LM Evaluation Harness" in output
9 changes: 2 additions & 7 deletions tests/test_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,13 +83,8 @@ def test_main(fake_checkpoint_dir, monkeypatch, tensor_like):
assert "'padded_vocab_size': 512, 'n_layer': 2, 'n_head': 4" in err.getvalue()


@pytest.mark.parametrize("mode", ["file", "entrypoint"])
def test_cli(mode):
if mode == "file":
cli_path = Path(__file__).parent.parent / "litgpt/generate/base.py"
args = [sys.executable, cli_path, "-h"]
else:
args = ["litgpt", "generate", "base", "-h"]
def test_cli():
args = ["litgpt", "generate", "base", "-h"]
output = subprocess.check_output(args)
output = str(output.decode())
assert "Generates text samples" in output
Expand Down
9 changes: 2 additions & 7 deletions tests/test_generate_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,8 @@ def test_main(fake_checkpoint_dir, monkeypatch, version, tensor_like):


@pytest.mark.parametrize("version", ("", "_v2"))
@pytest.mark.parametrize("mode", ["file", "entrypoint"])
def test_cli(version, mode):
if mode == "file":
cli_path = Path(__file__).parent.parent / f"litgpt/generate/adapter{version}.py"
args = [sys.executable, cli_path, "-h"]
else:
args = ["litgpt", "generate", f"adapter{version}", "-h"]
def test_cli(version):
args = ["litgpt", "generate", f"adapter{version}", "-h"]
output = subprocess.check_output(args)
output = str(output.decode())
assert "Generates a response" in output
Loading

0 comments on commit 6f631b3

Please sign in to comment.