-
Notifications
You must be signed in to change notification settings - Fork 3
/
train.py
229 lines (221 loc) · 11 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import glob
import os
import random
from collections import OrderedDict
import numpy as np
import torch
import wandb
import clip
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.elastic.multiprocessing.errors import record
from transformers import (
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
from models.seer_model import SeerAgent
from utils.train_utils import get_checkpoint, train_one_epoch_calvin, get_ckpt_name
from utils.arguments_utils import get_parser
from utils.data_utils import get_calvin_dataset, get_calvin_val_dataset, get_droid_dataset, get_libero_pretrain_dataset, get_libero_finetune_dataset, get_real_finetune_dataset, get_oxe_dataset
from utils.distributed_utils import init_distributed_device, world_info_from_env
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def count_parameters(model):
total_params = 0
trainable_params = 0
for param in model.parameters():
total_params += param.numel()
if param.requires_grad:
trainable_params += param.numel()
return total_params, trainable_params
@record
def main(args):
os.environ["WANDB_DIR"] = f"{os.path.abspath(args.save_checkpoint_path)}"
if args.save_checkpoints_to_wandb and args.save_checkpoint and not args.report_to_wandb:
raise ValueError("save_checkpoints_to_wandb requires report_to_wandb")
if args.offline:
os.environ["WANDB_MODE"] = "offline"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
args.local_rank, args.rank, args.world_size = world_info_from_env()
device_id = init_distributed_device(args)
print("device_id: ", device_id)
random_seed(args.seed)
ptbs = args.world_size * args.batch_size * args.gradient_accumulation_steps
print("training batch size:", ptbs)
args.run_name = args.run_name.replace("Seer", f"Seer_ptbs{ptbs}_{args.transformer_layers}layers_{args.transformer_heads}heads_hd{args.hidden_dim}")
print("run_name:", args.run_name)
model = SeerAgent(
finetune_type=args.finetune_type,
clip_device=device_id,
vit_checkpoint_path=args.vit_checkpoint_path,
sequence_length=args.sequence_length,
num_resampler_query=args.num_resampler_query,
num_obs_token_per_image=args.num_obs_token_per_image,
calvin_input_image_size=args.calvin_input_image_size,
patch_size=args.patch_size,
action_pred_steps=args.action_pred_steps,
obs_pred=args.obs_pred,
atten_only_obs=args.atten_only_obs,
attn_robot_proprio_state=args.attn_robot_proprio_state,
atten_goal=args.atten_goal,
atten_goal_state=args.atten_goal_state,
mask_l_obs_ratio=args.mask_l_obs_ratio,
transformer_layers=args.transformer_layers,
hidden_dim=args.hidden_dim,
transformer_heads=args.transformer_heads,
phase=args.phase,
gripper_width=args.gripper_width,
)
if args.finetune_type == "calvin":
calvin_dataset = get_calvin_dataset(args, model.image_processor, clip, epoch=0, except_lang=args.except_lang)
elif args.finetune_type == "droid":
calvin_dataset = get_droid_dataset(args, model.image_processor, clip, epoch=0)
elif args.finetune_type == "libero_pretrain":
calvin_dataset = get_libero_pretrain_dataset(args, model.image_processor, clip, epoch=0)
elif args.finetune_type == "libero_finetune":
calvin_dataset = get_libero_finetune_dataset(args, model.image_processor, clip, epoch=0)
elif args.finetune_type == "real":
calvin_dataset = get_real_finetune_dataset(args, model.image_processor, clip, epoch=0)
elif args.finetune_type == "oxe":
calvin_dataset = get_oxe_dataset(args, model.image_processor, clip, epoch=0)
random_seed(args.seed, args.rank)
print(f"Start running training on rank {args.rank}.")
if args.rank == 0 and args.report_to_wandb:
print("wandb_project :", args.wandb_project)
print("wandb_entity :", args.wandb_entity)
wandb.init(
project=args.wandb_project,
entity=args.wandb_entity,
name=args.run_name,
config=vars(args),
)
device_id = args.rank % torch.cuda.device_count()
if args.precision == "bf16" or args.precision == "amp_bfloat16" or args.precision == "amp_bf16":
model = model.bfloat16()
elif args.precision == "fp16":
model = model.half()
elif args.precision == "fp32":
model = model.float()
if 'vision_encoder' in args.bf16_module:
model.vision_encoder.bfloat16()
if "causal_transformer" in args.bf16_module:
model.transformer_backbone.bfloat16()
if "image_decoder" in args.bf16_module:
model.image_decoder.bfloat16()
model.image_decoder_obs_pred_projector.bfloat16()
model.clip_model.requires_grad_(False)
model.vision_encoder.requires_grad_(False)
total_params, trainable_params = count_parameters(model)
print("total_params: {} M".format(total_params/1024/1024))
print("trainable_params: {} M".format(trainable_params/1024/1024))
model = model.to(device_id)
model._init_model_type()
ddp_model = DDP(model, device_ids=[device_id], find_unused_parameters=True)
optimizer = torch.optim.AdamW([p for p in ddp_model.parameters() if p.requires_grad], lr=args.learning_rate, weight_decay=args.weight_decay) # TODO make sure the parameters which need to be optimized are passing
total_training_steps = calvin_dataset.dataloader.num_batches * args.num_epochs
args.warmup_steps = calvin_dataset.dataloader.num_batches * args.warmup_epochs
if args.rank == 0:
print(f"Total training steps: {total_training_steps}")
if args.lr_scheduler == "linear":
if args.gradient_accumulation_steps > 1:
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps // args.gradient_accumulation_steps + 1,
num_training_steps=total_training_steps // args.gradient_accumulation_steps + 1,
)
else:
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=total_training_steps,
)
elif args.lr_scheduler == "cosine":
if args.gradient_accumulation_steps > 1:
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps // args.gradient_accumulation_steps + 1,
num_training_steps=total_training_steps // args.gradient_accumulation_steps + 1,
)
else:
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=total_training_steps,
)
elif args.lr_scheduler == 'cosine_restart':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=1e-7)
else:
lr_scheduler = get_constant_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps
)
resume_from_epoch = 0
if args.finetune_from_pretrained_ckpt is not None:
if args.rank == 0:
print(f"Starting finetuning from pretrained checkpoint {args.finetune_from_pretrained_ckpt}")
checkpoint = torch.load(args.finetune_from_pretrained_ckpt, map_location="cpu")
image_decoder_keys = [k for k in checkpoint["model_state_dict"].keys() if "image_decoder" in k]
projector_keys = [k for k in checkpoint["model_state_dict"].keys() if "projector" in k]
action_decoder_keys = [k for k in checkpoint["model_state_dict"].keys() if "action_decoder" in k]
if args.reset_action_token:
del checkpoint["model_state_dict"]["module.action_pred_token"]
if args.reset_obs_token:
del checkpoint["model_state_dict"]["module.obs_tokens"]
if args.reset_mask_token:
del checkpoint["model_state_dict"]["module.mask_token"]
if args.reset_image_decoder:
for k in image_decoder_keys:
if k in checkpoint["model_state_dict"]:
del checkpoint["model_state_dict"][k]
if args.reset_action_decoder:
for k in action_decoder_keys:
if k in checkpoint["model_state_dict"]:
del checkpoint["model_state_dict"][k]
if checkpoint["model_state_dict"]["module.transformer_backbone_position_embedding"].shape != ddp_model.module.transformer_backbone_position_embedding.shape:
checkpoint["model_state_dict"]["module.transformer_backbone_position_embedding"] = checkpoint["model_state_dict"]["module.transformer_backbone_position_embedding"][:, :args.sequence_length, :, :]
print("loading pretrained weights :", checkpoint["model_state_dict"].keys())
ddp_model.load_state_dict(checkpoint["model_state_dict"], False)
if args.resume_from_checkpoint is not None:
if args.rank == 0:
print(f"Loading checkpoint from {args.resume_from_checkpoint}")
checkpoint = torch.load(args.resume_from_checkpoint, map_location="cpu")
ddp_model.load_state_dict(checkpoint["model_state_dict"], False)
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state_dict"])
resume_from_epoch = checkpoint["epoch"] + 1
ckpt_dir = os.path.join(f"{args.save_checkpoint_path}", args.run_name)
if args.rank == 0 and not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
ddp_model.train()
for epoch in range(resume_from_epoch, args.num_epochs):
calvin_dataset.set_epoch(epoch)
calvin_loader = calvin_dataset.dataloader
train_one_epoch_calvin(
args=args,
model=ddp_model,
epoch=epoch,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
calvin_loader=calvin_loader,
device_id=device_id,
wandb=wandb,
)
if args.rank == 0 and args.save_checkpoint and epoch % args.save_checkpoint_seq == 0 and epoch > args.start_save_checkpoint:
checkpoint_dict = {
"epoch": epoch,
"model_state_dict": get_checkpoint(ddp_model),
"optimizer_state_dict": optimizer.state_dict(),
"lr_scheduler_state_dict": lr_scheduler.state_dict(),
}
ckpt_name = get_ckpt_name(args, epoch)
ckpt_path = os.path.join(ckpt_dir, ckpt_name)
print(f"Saving checkpoint to {ckpt_path}")
torch.save(checkpoint_dict, ckpt_path)
if args.delete_previous_checkpoint:
if epoch > 0:
os.remove(ckpt_path)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)