Wendy-Fly commited on
Commit
0edaf98
·
verified ·
1 Parent(s): e2dc325

Upload pretrain_unified_navit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pretrain_unified_navit.py +705 -0
pretrain_unified_navit.py ADDED
@@ -0,0 +1,705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Bytedance Ltd. and/or its affiliates.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import functools
5
+ import os
6
+ import wandb
7
+ import yaml
8
+ from copy import deepcopy
9
+ from dataclasses import dataclass, field
10
+ from time import time
11
+
12
+ import torch
13
+ import torch.distributed as dist
14
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
15
+ CheckpointImpl,
16
+ apply_activation_checkpointing,
17
+ checkpoint_wrapper,
18
+ )
19
+ from torch.utils.data import DataLoader
20
+ from transformers import HfArgumentParser, set_seed
21
+ from transformers.optimization import (
22
+ get_constant_schedule_with_warmup,
23
+ get_cosine_with_min_lr_schedule_with_warmup,
24
+ )
25
+
26
+ from data.dataset_base import DataConfig, PackedDataset, collate_wrapper
27
+ from data.data_utils import add_special_tokens
28
+ from modeling.autoencoder import load_ae
29
+ from modeling.bagel import (
30
+ BagelConfig, Bagel, Qwen2Config, Qwen2ForCausalLM, SiglipVisionConfig, SiglipVisionModel
31
+ )
32
+ from modeling.qwen2 import Qwen2Tokenizer
33
+ from train.train_utils import create_logger, get_latest_ckpt
34
+ from train.fsdp_utils import (
35
+ FSDPCheckpoint, FSDPConfig, grad_checkpoint_check_fn, fsdp_wrapper,
36
+ fsdp_ema_setup, fsdp_ema_update,
37
+ )
38
+
39
+
40
+ @dataclass
41
+ class ModelArguments:
42
+ model_path: str = field(
43
+ default="/mnt/beegfs/Workspace/Models/BAGEL-7B-MoT",
44
+ metadata={"help": "Path of the pretrained BAGEL model."}
45
+ )
46
+ llm_path: str = field(
47
+ default="/mnt/beegfs/Workspace/Models/Qwen2.5-0.5B-Instruct/",
48
+ metadata={"help": "Path or HuggingFace repo ID of the pretrained Qwen2-style language model."}
49
+ )
50
+ llm_qk_norm: bool = field(
51
+ default=True,
52
+ metadata={"help": "Enable QK LayerNorm (qk_norm) inside the attention blocks."}
53
+ )
54
+ tie_word_embeddings: bool = field(
55
+ default=False,
56
+ metadata={"help": "Share input and output word embeddings (tied embeddings)."}
57
+ )
58
+ layer_module: str = field(
59
+ default="Qwen2MoTDecoderLayer",
60
+ metadata={"help": "Python class name of the decoder layer to instantiate."}
61
+ )
62
+ vae_path: str = field(
63
+ default="/mnt/beegfs/Workspace/Models/vae-flux/ae.safetensors",
64
+ metadata={"help": "Path to the pretrained VAE checkpoint for latent-space image generation."}
65
+ )
66
+ vit_path: str = field(
67
+ default="/mnt/beegfs/Workspace/Models/siglip-so400m-14-980-flash-attn2-navit/",
68
+ metadata={"help": "Path or repo ID of the SigLIP Vision Transformer used for image understanding."}
69
+ )
70
+ max_latent_size: int = field(
71
+ default=32,
72
+ metadata={"help": "Maximum latent grid size (patches per side) for the VAE latent tensor."}
73
+ )
74
+ latent_patch_size: int = field(
75
+ default=2,
76
+ metadata={"help": "Spatial size (in VAE pixels) covered by each latent patch."}
77
+ )
78
+ vit_patch_size: int = field(
79
+ default=14,
80
+ metadata={"help": "Patch size (pixels) for the Vision Transformer encoder."}
81
+ )
82
+ vit_max_num_patch_per_side: int = field(
83
+ default=70,
84
+ metadata={"help": "Maximum number of ViT patches along one image side after cropping / resize."}
85
+ )
86
+ connector_act: str = field(
87
+ default="gelu_pytorch_tanh",
88
+ metadata={"help": "Activation function used in the latent-to-text connector MLP."}
89
+ )
90
+ interpolate_pos: bool = field(
91
+ default=False,
92
+ metadata={"help": "Interpolate positional embeddings when image resolution differs from pre-training."}
93
+ )
94
+ vit_select_layer: int = field(
95
+ default=-2,
96
+ metadata={"help": "Which hidden layer of the ViT to take as the visual feature (negative = from the end)."}
97
+ )
98
+ vit_rope: bool = field(
99
+ default=False,
100
+ metadata={"help": "Replace ViT positional encodings with RoPE."}
101
+ )
102
+
103
+ text_cond_dropout_prob: float = field(
104
+ default=0.1,
105
+ metadata={"help": "Probability of dropping text embeddings during training."}
106
+ )
107
+ vae_cond_dropout_prob: float = field(
108
+ default=0.3,
109
+ metadata={"help": "Probability of dropping VAE latent inputs during training."}
110
+ )
111
+ vit_cond_dropout_prob: float = field(
112
+ default=0.3,
113
+ metadata={"help": "Probability of dropping ViT visual features during training."}
114
+ )
115
+
116
+
117
+ @dataclass
118
+ class DataArguments:
119
+ dataset_config_file: str = field(
120
+ default="data/configs/example.yaml",
121
+ metadata={"help": "YAML file specifying dataset groups, weights, and preprocessing rules."}
122
+ )
123
+ prefetch_factor: int = field(
124
+ default=2,
125
+ metadata={"help": "How many batches each DataLoader worker pre-loads in advance."}
126
+ )
127
+ num_workers: int = field(
128
+ default=4,
129
+ metadata={"help": "Number of background workers for the PyTorch DataLoader."}
130
+ )
131
+ max_num_tokens_per_sample: int = field(
132
+ default=16384,
133
+ metadata={"help": "Maximum tokens allowed in one raw sample; longer samples are skipped."}
134
+ )
135
+ max_num_tokens: int = field(
136
+ default=36864,
137
+ metadata={"help": "Hard limit on tokens in a packed batch; flush if adding a sample would exceed it."}
138
+ )
139
+ prefer_buffer_before: int = field(
140
+ default=16384,
141
+ metadata={"help": "While batch length is below this, pop from the overflow buffer before new sampling."}
142
+ )
143
+ max_buffer_size: int = field(
144
+ default=50,
145
+ metadata={"help": "Maximum number of oversized samples kept in the overflow buffer."}
146
+ )
147
+ data_seed: int = field(
148
+ default=42,
149
+ metadata={"help": "Seed used when shuffling / sampling data shards to ensure reproducibility."}
150
+ )
151
+
152
+
153
+ @dataclass
154
+ class TrainingArguments:
155
+ # --- modality switches ---
156
+ visual_gen: bool = field(
157
+ default=True,
158
+ metadata={"help": "Train image generation branch."}
159
+ )
160
+ visual_und: bool = field(
161
+ default=False,
162
+ metadata={"help": "Train image understanding branch."}
163
+ )
164
+
165
+ # --- bookkeeping & logging ---
166
+ results_dir: str = field(
167
+ default="results",
168
+ metadata={"help": "Root directory for logs."}
169
+ )
170
+ checkpoint_dir: str = field(
171
+ default="results/checkpoints",
172
+ metadata={"help": "Root directory for model checkpoints."}
173
+ )
174
+ wandb_project: str = field(
175
+ default="bagel",
176
+ metadata={"help": "Weights & Biases project name."}
177
+ )
178
+ wandb_name: str = field(
179
+ default="run",
180
+ metadata={"help": "Name shown in the Weights & Biases UI for this run."}
181
+ )
182
+ wandb_runid: str = field(
183
+ default="0",
184
+ metadata={"help": "Unique identifier to resume a previous W&B run, if desired."}
185
+ )
186
+ wandb_resume: str = field(
187
+ default="allow",
188
+ metadata={"help": "W&B resume mode: 'allow', 'must', or 'never'."}
189
+ )
190
+ wandb_offline: bool = field(
191
+ default=False,
192
+ metadata={"help": "Run W&B in offline mode (logs locally, sync later)."}
193
+ )
194
+
195
+ # --- reproducibility & resume ---
196
+ global_seed: int = field(
197
+ default=4396,
198
+ metadata={"help": "Base random seed; actual seed is offset by rank for DDP."}
199
+ )
200
+ auto_resume: bool = field(
201
+ default=False,
202
+ metadata={"help": "Automatically pick up the latest checkpoint found in checkpoint_dir."}
203
+ )
204
+ resume_from: str = field(
205
+ default=None,
206
+ metadata={"help": "Explicit checkpoint path to resume from (overrides auto_resume)." }
207
+ )
208
+ resume_model_only: bool = field(
209
+ default=False,
210
+ metadata={"help": "Load only model weights, ignoring optimizer/scheduler states."}
211
+ )
212
+ finetune_from_ema: bool = field(
213
+ default=False,
214
+ metadata={"help": "When resume_model_only=True, load the EMA (exponential moving average) weights instead of raw weights."}
215
+ )
216
+ finetune_from_hf: bool = field(
217
+ default=False,
218
+ metadata={"help": "Whether finetune from HugginFace model."}
219
+ )
220
+
221
+ # --- reporting frequency ---
222
+ log_every: int = field(
223
+ default=10,
224
+ metadata={"help": "Print / log every N training steps."}
225
+ )
226
+ save_every: int = field(
227
+ default=2000,
228
+ metadata={"help": "Save a checkpoint every N training steps."}
229
+ )
230
+ total_steps: int = field(
231
+ default=500_000,
232
+ metadata={"help": "Total number of optimizer steps to train for."}
233
+ )
234
+
235
+ # --- optimization & scheduler ---
236
+ warmup_steps: int = field(
237
+ default=2000,
238
+ metadata={"help": "Linear warm-up steps before applying the main LR schedule."}
239
+ )
240
+ lr_scheduler: str = field(
241
+ default="constant",
242
+ metadata={"help": "Type of LR schedule: 'constant' or 'cosine'."}
243
+ )
244
+ lr: float = field(
245
+ default=1e-4,
246
+ metadata={"help": "Peak learning rate after warm-up."}
247
+ )
248
+ min_lr: float = field(
249
+ default=1e-7,
250
+ metadata={"help": "Minimum learning rate for cosine schedule (ignored for constant)."}
251
+ )
252
+ beta1: float = field(
253
+ default=0.9,
254
+ metadata={"help": "AdamW β₁ coefficient."}
255
+ )
256
+ beta2: float = field(
257
+ default=0.95,
258
+ metadata={"help": "AdamW β₂ coefficient."}
259
+ )
260
+ eps: float = field(
261
+ default=1e-15,
262
+ metadata={"help": "AdamW ε for numerical stability."}
263
+ )
264
+ ema: float = field(
265
+ default=0.9999,
266
+ metadata={"help": "Decay rate for the exponential moving average of model weights."}
267
+ )
268
+ max_grad_norm: int = field(
269
+ default=1.0,
270
+ metadata={"help": "Gradient clipping threshold (L2 norm)."}
271
+ )
272
+ timestep_shift: float = field(
273
+ default=1.0,
274
+ metadata={"help": "Shift applied to diffusion timestep indices (for latent prediction)."}
275
+ )
276
+ mse_weight: float = field(
277
+ default=1.0,
278
+ metadata={"help": "Scaling factor for the image-reconstruction MSE loss term."}
279
+ )
280
+ ce_weight: float = field(
281
+ default=1.0,
282
+ metadata={"help": "Scaling factor for the language cross-entropy loss term."}
283
+ )
284
+ ce_loss_reweighting: bool = field(
285
+ default=False,
286
+ metadata={"help": "Reweight CE loss by token importance (provided via ce_loss_weights)."}
287
+ )
288
+ expected_num_tokens: int = field(
289
+ default=32768,
290
+ metadata={"help": "Soft target token count; yield the batch once it reaches or exceeds this size."}
291
+ )
292
+
293
+ # --- distributed training / FSDP ---
294
+ num_replicate: int = field(
295
+ default=1,
296
+ metadata={"help": "Number of model replicas per GPU rank for tensor parallelism."}
297
+ )
298
+ num_shard: int = field(
299
+ default=8,
300
+ metadata={"help": "Number of parameter shards when using FSDP HYBRID_SHARD."}
301
+ )
302
+ sharding_strategy: str = field(
303
+ default="HYBRID_SHARD",
304
+ metadata={"help": "FSDP sharding strategy: FULL_SHARD, SHARD_GRAD_OP, HYBRID_SHARD, etc."}
305
+ )
306
+ backward_prefetch: str = field(
307
+ default="BACKWARD_PRE",
308
+ metadata={"help": "FSDP backward prefetch strategy (BACKWARD_PRE or NO_PREFETCH)."}
309
+ )
310
+ cpu_offload: bool = field(
311
+ default=False,
312
+ metadata={"help": "Enable FSDP parameter offload to CPU."}
313
+ )
314
+
315
+ # --- module freezing ---
316
+ freeze_llm: bool = field(
317
+ default=False,
318
+ metadata={"help": "Keep language-model weights fixed (no gradient updates)."}
319
+ )
320
+ freeze_vit: bool = field(
321
+ default=False,
322
+ metadata={"help": "Keep ViT weights fixed during training."}
323
+ )
324
+ freeze_vae: bool = field(
325
+ default=True,
326
+ metadata={"help": "Keep VAE weights fixed; only predict latents, don’t fine-tune encoder/decoder."}
327
+ )
328
+ freeze_und: bool = field(
329
+ default=False,
330
+ metadata={"help": "Freeze the visual understanding connector layers."}
331
+ )
332
+ copy_init_moe: bool = field(
333
+ default=True,
334
+ metadata={"help": "Duplicate initial MoE experts so each has identical initialisation."}
335
+ )
336
+ use_flex: bool = field(
337
+ default=False,
338
+ metadata={"help": "Enable FLEX (flash-ext friendly) packing algorithm for sequence data."}
339
+ )
340
+
341
+
342
+ def main():
343
+ assert torch.cuda.is_available()
344
+ dist.init_process_group("nccl")
345
+ device = dist.get_rank() % torch.cuda.device_count()
346
+ torch.cuda.set_device(device)
347
+ parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
348
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
349
+
350
+ # Setup logging:
351
+ if dist.get_rank() == 0:
352
+ os.makedirs(training_args.results_dir, exist_ok=True)
353
+ os.makedirs(training_args.checkpoint_dir, exist_ok=True)
354
+ logger = create_logger(training_args.results_dir, dist.get_rank())
355
+ wandb.init(
356
+ project=training_args.wandb_project,
357
+ id=f"{training_args.wandb_name}-run{training_args.wandb_runid}",
358
+ name=training_args.wandb_name,
359
+ resume=training_args.wandb_resume,
360
+ mode="offline" if training_args.wandb_offline else "online"
361
+ )
362
+ # wandb.config.update(training_args)
363
+ # wandb.config.update(model_args)
364
+ # wandb.config.update(data_args)
365
+ wandb.config.update(training_args, allow_val_change=True)
366
+ wandb.config.update(model_args, allow_val_change=True)
367
+ wandb.config.update(data_args, allow_val_change=True)
368
+
369
+ else:
370
+ logger = create_logger(None, dist.get_rank())
371
+ dist.barrier()
372
+ logger.info(f'Training arguments {training_args}')
373
+ logger.info(f'Model arguments {model_args}')
374
+ logger.info(f'Data arguments {data_args}')
375
+
376
+ # prepare auto resume logic:
377
+ if training_args.auto_resume:
378
+ resume_from = get_latest_ckpt(training_args.checkpoint_dir)
379
+ if resume_from is None:
380
+ resume_from = training_args.resume_from
381
+ resume_model_only = training_args.resume_model_only
382
+ if resume_model_only:
383
+ finetune_from_ema = training_args.finetune_from_ema
384
+ else:
385
+ finetune_from_ema = False
386
+ else:
387
+ resume_model_only = False
388
+ finetune_from_ema = False
389
+ else:
390
+ resume_from = training_args.resume_from
391
+ resume_model_only = training_args.resume_model_only
392
+ if resume_model_only:
393
+ finetune_from_ema = training_args.finetune_from_ema
394
+ else:
395
+ finetune_from_ema = False
396
+
397
+ # Set seed:
398
+ seed = training_args.global_seed * dist.get_world_size() + dist.get_rank()
399
+ set_seed(seed)
400
+
401
+ # Setup model:
402
+ if training_args.finetune_from_hf:
403
+ llm_config = Qwen2Config.from_json_file(os.path.join(model_args.model_path, "llm_config.json"))
404
+ else:
405
+ llm_config = Qwen2Config.from_pretrained(model_args.llm_path)
406
+ llm_config.layer_module = model_args.layer_module
407
+ llm_config.qk_norm = model_args.llm_qk_norm
408
+ llm_config.tie_word_embeddings = model_args.tie_word_embeddings
409
+ llm_config.freeze_und = training_args.freeze_und
410
+ if training_args.finetune_from_hf:
411
+ language_model = Qwen2ForCausalLM(llm_config)
412
+ else:
413
+ language_model = Qwen2ForCausalLM.from_pretrained(model_args.llm_path, config=llm_config)
414
+ if training_args.copy_init_moe:
415
+ language_model.init_moe()
416
+
417
+ if training_args.visual_und:
418
+ if training_args.finetune_from_hf:
419
+ vit_config = SiglipVisionConfig.from_json_file(os.path.join(model_args.model_path, "vit_config.json"))
420
+ else:
421
+ vit_config = SiglipVisionConfig.from_pretrained(model_args.vit_path)
422
+ vit_config.num_hidden_layers = vit_config.num_hidden_layers + 1 + model_args.vit_select_layer
423
+ vit_config.rope = model_args.vit_rope
424
+ if training_args.finetune_from_hf:
425
+ vit_model = SiglipVisionModel(vit_config)
426
+ else:
427
+ vit_model = SiglipVisionModel.from_pretrained(model_args.vit_path, config=vit_config)
428
+
429
+ if training_args.visual_gen:
430
+ vae_model, vae_config = load_ae(
431
+ local_path=os.path.join(model_args.model_path, "ae.safetensors")
432
+ if training_args.finetune_from_hf else model_args.vae_path
433
+ )
434
+
435
+ config = BagelConfig(
436
+ visual_gen=training_args.visual_gen,
437
+ visual_und=training_args.visual_und,
438
+ llm_config=llm_config,
439
+ vit_config=vit_config if training_args.visual_und else None,
440
+ vae_config=vae_config if training_args.visual_gen else None,
441
+ latent_patch_size=model_args.latent_patch_size,
442
+ max_latent_size=model_args.max_latent_size,
443
+ vit_max_num_patch_per_side=model_args.vit_max_num_patch_per_side,
444
+ connector_act=model_args.connector_act,
445
+ interpolate_pos=model_args.interpolate_pos,
446
+ timestep_shift=training_args.timestep_shift,
447
+ )
448
+ model = Bagel(
449
+ language_model,
450
+ vit_model if training_args.visual_und else None,
451
+ config
452
+ )
453
+
454
+ if training_args.visual_und:
455
+ model.vit_model.vision_model.embeddings.convert_conv2d_to_linear(vit_config)
456
+
457
+ # Setup tokenizer for model:
458
+ tokenizer = Qwen2Tokenizer.from_pretrained(model_args.model_path if training_args.finetune_from_hf else model_args.llm_path)
459
+ tokenizer, new_token_ids, num_new_tokens = add_special_tokens(tokenizer)
460
+ if num_new_tokens > 0:
461
+ model.language_model.resize_token_embeddings(len(tokenizer))
462
+ model.config.llm_config.vocab_size = len(tokenizer)
463
+ model.language_model.config.vocab_size = len(tokenizer)
464
+
465
+ # maybe freeze something:
466
+ if training_args.freeze_vae and training_args.visual_gen:
467
+ for param in vae_model.parameters():
468
+ param.requires_grad = False
469
+ if training_args.freeze_llm:
470
+ model.language_model.eval()
471
+ for param in model.language_model.parameters():
472
+ param.requires_grad = False
473
+ if training_args.freeze_vit and training_args.visual_und:
474
+ model.vit_model.eval()
475
+ for param in model.vit_model.parameters():
476
+ param.requires_grad = False
477
+
478
+ # Setup FSDP and load pretrained model:
479
+ fsdp_config = FSDPConfig(
480
+ sharding_strategy=training_args.sharding_strategy,
481
+ backward_prefetch=training_args.backward_prefetch,
482
+ cpu_offload=training_args.cpu_offload,
483
+ num_replicate=training_args.num_replicate,
484
+ num_shard=training_args.num_shard,
485
+ )
486
+ ema_model = deepcopy(model)
487
+ model, ema_model = FSDPCheckpoint.try_load_ckpt(
488
+ resume_from, logger, model, ema_model, resume_from_ema=finetune_from_ema
489
+ )
490
+ ema_model = fsdp_ema_setup(ema_model, fsdp_config)
491
+ fsdp_model = fsdp_wrapper(model, fsdp_config)
492
+ apply_activation_checkpointing(
493
+ fsdp_model,
494
+ checkpoint_wrapper_fn=functools.partial(
495
+ checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT
496
+ ),
497
+ check_fn=grad_checkpoint_check_fn
498
+ )
499
+
500
+ if dist.get_rank() == 0:
501
+ print(fsdp_model)
502
+ for name, param in model.named_parameters():
503
+ print(name, param.requires_grad)
504
+
505
+ # Setup optimizer and scheduler
506
+ optimizer = torch.optim.AdamW(
507
+ fsdp_model.parameters(),
508
+ lr=training_args.lr,
509
+ betas=(training_args.beta1, training_args.beta2),
510
+ eps=training_args.eps,
511
+ weight_decay=0
512
+ )
513
+ if training_args.lr_scheduler == 'cosine':
514
+ scheduler = get_cosine_with_min_lr_schedule_with_warmup(
515
+ optimizer=optimizer,
516
+ num_warmup_steps=training_args.warmup_steps,
517
+ num_training_steps=training_args.total_steps,
518
+ min_lr=training_args.min_lr,
519
+ )
520
+ elif training_args.lr_scheduler == 'constant':
521
+ scheduler = get_constant_schedule_with_warmup(
522
+ optimizer=optimizer, num_warmup_steps=training_args.warmup_steps
523
+ )
524
+ else:
525
+ raise ValueError
526
+
527
+ # maybe resume optimizer, scheduler, and train_steps
528
+ if resume_model_only:
529
+ train_step = 0
530
+ data_status = None
531
+ else:
532
+ optimizer, scheduler, train_step, data_status = FSDPCheckpoint.try_load_train_state(
533
+ resume_from, optimizer, scheduler, fsdp_config,
534
+ )
535
+
536
+ # Setup packed dataloader
537
+ with open(data_args.dataset_config_file, "r") as stream:
538
+ dataset_meta = yaml.safe_load(stream)
539
+ dataset_config = DataConfig(grouped_datasets=dataset_meta)
540
+ if training_args.visual_und:
541
+ dataset_config.vit_patch_size = model_args.vit_patch_size
542
+ dataset_config.max_num_patch_per_side = model_args.vit_max_num_patch_per_side
543
+ if training_args.visual_gen:
544
+ vae_image_downsample = model_args.latent_patch_size * vae_config.downsample
545
+ dataset_config.vae_image_downsample = vae_image_downsample
546
+ dataset_config.max_latent_size = model_args.max_latent_size
547
+ dataset_config.text_cond_dropout_prob = model_args.text_cond_dropout_prob
548
+ dataset_config.vae_cond_dropout_prob = model_args.vae_cond_dropout_prob
549
+ dataset_config.vit_cond_dropout_prob = model_args.vit_cond_dropout_prob
550
+ train_dataset = PackedDataset(
551
+ dataset_config,
552
+ tokenizer=tokenizer,
553
+ special_tokens=new_token_ids,
554
+ local_rank=dist.get_rank(),
555
+ world_size=dist.get_world_size(),
556
+ num_workers=data_args.num_workers,
557
+ expected_num_tokens=training_args.expected_num_tokens,
558
+ max_num_tokens_per_sample=data_args.max_num_tokens_per_sample,
559
+ max_num_tokens=data_args.max_num_tokens,
560
+ max_buffer_size=data_args.max_buffer_size,
561
+ prefer_buffer_before=data_args.prefer_buffer_before,
562
+ interpolate_pos=model_args.interpolate_pos,
563
+ use_flex=training_args.use_flex,
564
+ data_status=data_status,
565
+ )
566
+ train_dataset.set_epoch(data_args.data_seed)
567
+ train_loader = DataLoader(
568
+ train_dataset,
569
+ batch_size=1, # batch size is 1 packed dataset
570
+ num_workers=data_args.num_workers,
571
+ pin_memory=True,
572
+ collate_fn=collate_wrapper(),
573
+ drop_last=True,
574
+ prefetch_factor=data_args.prefetch_factor,
575
+ )
576
+
577
+ # Prepare models for training:
578
+ if training_args.visual_gen:
579
+ vae_model.to(device).eval()
580
+ fsdp_model.train()
581
+ ema_model.eval()
582
+
583
+ # train loop
584
+ start_time = time()
585
+ logger.info(f"Training for {training_args.total_steps} steps, starting at {train_step}...")
586
+ for curr_step, data in enumerate(train_loader, start=train_step):
587
+ data = data.cuda(device).to_dict()
588
+ data_indexes = data.pop('batch_data_indexes', None)
589
+ ce_loss_weights = data.pop('ce_loss_weights', None)
590
+ with torch.amp.autocast("cuda", enabled=True, dtype=torch.bfloat16):
591
+ if training_args.visual_gen:
592
+ with torch.no_grad():
593
+ data['padded_latent'] = vae_model.encode(data.pop('padded_images'))
594
+ loss_dict = fsdp_model(**data)
595
+
596
+ loss = 0
597
+ ce = loss_dict["ce"]
598
+ if ce is not None:
599
+ total_ce_tokens = torch.tensor(len(data['ce_loss_indexes']), device=device)
600
+ dist.all_reduce(total_ce_tokens, op=dist.ReduceOp.SUM)
601
+ if training_args.ce_loss_reweighting:
602
+ ce = ce * ce_loss_weights
603
+ total_ce_loss_weights = ce_loss_weights.sum()
604
+ dist.all_reduce(total_ce_loss_weights, op=dist.ReduceOp.SUM)
605
+ ce = ce.sum() * dist.get_world_size() / total_ce_loss_weights
606
+ else:
607
+ ce = ce.sum() * dist.get_world_size() / total_ce_tokens
608
+ loss_dict["ce"] = ce.detach()
609
+ loss = loss + ce * training_args.ce_weight
610
+ else:
611
+ assert not training_args.visual_und
612
+ loss_dict["ce"] = torch.tensor(0, device=device)
613
+ total_ce_tokens = torch.tensor(0, device=device)
614
+
615
+ if training_args.visual_gen:
616
+ mse = loss_dict["mse"]
617
+ total_mse_tokens = torch.tensor(len(data['mse_loss_indexes']), device=device)
618
+ dist.all_reduce(total_mse_tokens, op=dist.ReduceOp.SUM)
619
+ mse = mse.mean(dim=-1).sum() * dist.get_world_size() / total_mse_tokens
620
+ loss_dict["mse"] = mse.detach()
621
+ loss = loss + mse * training_args.mse_weight
622
+ else:
623
+ assert not training_args.visual_gen
624
+ loss_dict["mse"] = torch.tensor(0, device=device)
625
+ total_mse_tokens = torch.tensor(0, device=device)
626
+
627
+ optimizer.zero_grad()
628
+ loss.backward()
629
+ total_norm = fsdp_model.clip_grad_norm_(training_args.max_grad_norm)
630
+ optimizer.step()
631
+ scheduler.step()
632
+ fsdp_ema_update(ema_model, fsdp_model, decay=training_args.ema)
633
+
634
+ # Log loss values:
635
+ if curr_step % training_args.log_every == 0:
636
+ total_samples = torch.tensor(len(data['sample_lens']), device=device)
637
+ dist.all_reduce(total_samples, op=dist.ReduceOp.SUM)
638
+
639
+ # Measure training speed:
640
+ torch.cuda.synchronize()
641
+ end_time = time()
642
+ steps_per_sec = training_args.log_every / (end_time - start_time)
643
+ message = f"(step={curr_step:07d}) "
644
+ wandb_log = {}
645
+ for key, value in loss_dict.items():
646
+ # Reduce loss history over all processes:
647
+ avg_loss = torch.tensor(value.item(), device=device)
648
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
649
+ avg_loss = avg_loss.item() / dist.get_world_size()
650
+ message += f"Train Loss {key}: {avg_loss:.4f}, "
651
+ wandb_log[key] = avg_loss
652
+ message += f"Train Steps/Sec: {steps_per_sec:.2f}, "
653
+ logger.info(message)
654
+
655
+ wandb_log['lr'] = optimizer.param_groups[0]['lr']
656
+ wandb_log['total_mse_tokens'] = total_mse_tokens.item()
657
+ wandb_log['total_ce_tokens'] = total_ce_tokens.item()
658
+ wandb_log['total_norm'] = total_norm.item()
659
+ wandb_log['total_samples'] = total_samples.item()
660
+
661
+ mem_allocated = torch.tensor(torch.cuda.max_memory_allocated() / 1024**2, device=device)
662
+ dist.all_reduce(mem_allocated, op=dist.ReduceOp.MAX)
663
+ wandb_log['mem_allocated'] = mem_allocated
664
+ mem_cache = torch.tensor(torch.cuda.max_memory_reserved() / 1024**2, device=device)
665
+ dist.all_reduce(mem_cache, op=dist.ReduceOp.MAX)
666
+ wandb_log['mem_cache'] = mem_cache
667
+
668
+ if dist.get_rank() == 0:
669
+ wandb.log(wandb_log, step=curr_step)
670
+ start_time = time()
671
+
672
+ if data_status is None:
673
+ data_status = {}
674
+ for item in data_indexes:
675
+ if item['dataset_name'] not in data_status.keys():
676
+ data_status[item['dataset_name']] = {}
677
+ data_status[item['dataset_name']][item['worker_id']] = item['data_indexes']
678
+
679
+ if curr_step > 0 and curr_step % training_args.save_every == 0:
680
+ if dist.get_rank() == 0:
681
+ gather_list = [None] * dist.get_world_size()
682
+ else:
683
+ gather_list = None
684
+ dist.gather_object(data_status, gather_list, dst=0)
685
+
686
+ FSDPCheckpoint.fsdp_save_ckpt(
687
+ ckpt_dir=training_args.checkpoint_dir,
688
+ train_steps=curr_step,
689
+ model=fsdp_model,
690
+ ema_model=ema_model,
691
+ optimizer=optimizer,
692
+ scheduler=scheduler,
693
+ logger=logger,
694
+ fsdp_config=fsdp_config,
695
+ data_status=gather_list
696
+ )
697
+
698
+ logger.info("Done!")
699
+ if dist.get_rank() == 0:
700
+ wandb.finish()
701
+ dist.destroy_process_group()
702
+
703
+
704
+ if __name__ == "__main__":
705
+ main()