Skip to content

Commit

Permalink
Simplify code (#1429)
Browse files Browse the repository at this point in the history
  • Loading branch information
rasbt authored May 21, 2024
1 parent cbbe9cd commit aa95635
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 6 deletions.
3 changes: 1 addition & 2 deletions litgpt/finetune/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,14 @@ def main(

model = fabric.setup_module(model)

trainable_params = [p for p in model.parameters() if p.requires_grad]
if isinstance(fabric.strategy.precision, BitsandbytesPrecision):
import bitsandbytes as bnb

optimizer_cls = bnb.optim.PagedAdamW
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_params, lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
model.parameters(), lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
)
optimizer = fabric.setup_optimizers(optimizer)
scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
Expand Down
3 changes: 1 addition & 2 deletions litgpt/finetune/adapter_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,14 @@ def main(

model = fabric.setup_module(model)

trainable_params = [p for p in model.parameters() if p.requires_grad]
if isinstance(fabric.strategy.precision, BitsandbytesPrecision):
import bitsandbytes as bnb

optimizer_cls = bnb.optim.PagedAdamW
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_params, lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
model.parameters(), lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
)
optimizer = fabric.setup_optimizers(optimizer)
scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
Expand Down
3 changes: 1 addition & 2 deletions litgpt/finetune/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,15 +175,14 @@ def main(

model = fabric.setup_module(model)

trainable_params = [p for p in model.parameters() if p.requires_grad]
if isinstance(fabric.strategy.precision, BitsandbytesPrecision):
import bitsandbytes as bnb

optimizer_cls = bnb.optim.PagedAdamW
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_params, lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
model.parameters(), lr=train.learning_rate, weight_decay=train.weight_decay, betas=(train.beta1, train.beta2)
)
optimizer = fabric.setup_optimizers(optimizer)
scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
Expand Down

0 comments on commit aa95635

Please sign in to comment.