train_utils.py 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. import os
  4. import time
  5. import yaml
  6. import torch.nn as nn
  7. from contextlib import nullcontext
  8. from pathlib import Path
  9. from datetime import datetime
  10. import contextlib
  11. import torch.nn.functional as F
  12. import torch
  13. import torch.cuda.nccl as nccl
  14. import torch.distributed as dist
  15. from torch.distributed.fsdp import StateDictType
  16. from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
  17. from tqdm import tqdm
  18. from transformers import LlamaTokenizer
  19. import json
  20. from llama_recipes.model_checkpointing import save_fsdp_model_checkpoint_full, save_model_and_optimizer_sharded, save_optimizer_checkpoint, save_peft_checkpoint, save_model_checkpoint
  21. from llama_recipes.policies import fpSixteen,bfSixteen, get_llama_wrapper
  22. from llama_recipes.utils.memory_utils import MemoryTrace
  23. from accelerate.utils import is_xpu_available, is_ccl_available
  24. from llama_recipes.utils.flop_utils import FlopMeasure
  25. def set_tokenizer_params(tokenizer: LlamaTokenizer):
  26. tokenizer.pad_token_id = 0
  27. tokenizer.padding_side = "left"
  28. @contextlib.contextmanager
  29. def profile(cfg, local_rank=None):
  30. use_profiler: bool = cfg.use_profiler
  31. use_flop_counter: bool = cfg.flop_counter
  32. if use_flop_counter and use_profiler:
  33. raise ValueError("Cannot use both profiler and flop counter")
  34. if use_profiler:
  35. # profiler needs a warmup stage to get the accurate profiling results
  36. wait_step, warmup_step, active_step = 1, 2, 3
  37. min_step = wait_step + warmup_step + active_step + 1
  38. if cfg.max_train_step > 0 and cfg.max_train_step < min_step:
  39. raise ValueError(f"pytorch profiler requires at least {min_step} train steps to finish the warm-up and recording stage, {wait_step} for wait_step, {warmup_step} for warmup_step, {active_step} for profiling step, please increase the max_train_step, current max_train_step {cfg.max_train_step}")
  40. print(f"pytorch profiling is activated and results will be saved in {cfg.profiler_dir}")
  41. with torch.profiler.profile(
  42. activities=[
  43. torch.profiler.ProfilerActivity.CPU,
  44. torch.profiler.ProfilerActivity.CUDA,
  45. ],
  46. schedule=torch.profiler.schedule(wait=wait_step, warmup=warmup_step, active=active_step, repeat=1),
  47. on_trace_ready=torch.profiler.tensorboard_trace_handler(
  48. cfg.profiler_dir
  49. ),
  50. profile_memory=True,
  51. with_stack=False,
  52. with_flops=True,
  53. record_shapes=True,
  54. ) as torch_profiler:
  55. yield torch_profiler
  56. elif use_flop_counter:
  57. if cfg.max_train_step > 0 and cfg.max_train_step <= cfg.flop_counter_start:
  58. raise ValueError(f"flop counter requires at least {cfg.flop_counter_start + 1} train steps, please increase the max_train_step, current max_train_step {cfg.max_train_step}")
  59. with FlopMeasure(rank=local_rank,warmup_step=cfg.flop_counter_start) as flop_counter:
  60. yield flop_counter
  61. else:
  62. torch_profiler = contextlib.nullcontext()
  63. yield None
  64. def get_value_loss(logits, scores, labels, tokenizer, xpo_hyper, beta):
  65. # TODO, Test if a shift is needed
  66. labels = labels[:, 1:].clone()
  67. logits = logits[:, :-1, :]
  68. labels[labels == -100] = tokenizer.pad_token_id
  69. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  70. loss_mask = labels != tokenizer.pad_token_id
  71. log_prob = (per_token_logps * loss_mask).sum(-1)
  72. average_log_prob = beta * (log_prob / loss_mask.sum(-1) + xpo_hyper)
  73. predicted_probs = torch.sigmoid(average_log_prob)
  74. # Try
  75. weights = torch.where(scores == 1, torch.tensor(2.0), torch.tensor(1.0))
  76. scores = scores.to(predicted_probs.dtype)
  77. value_loss = F.binary_cross_entropy(predicted_probs, scores, weight=weights, reduction='mean')
  78. return value_loss, average_log_prob
  79. def get_calibration_loss(logits, scores, labels, tokenizer, config):
  80. def get_pearson_correlation_per_batch(z_prob, z_score):
  81. # Compute covariance between z_prob and z_score for each batch (dim=1)
  82. mean_z_prob = z_prob.mean(dim=1, keepdim=True)
  83. mean_z_score = z_score.mean(dim=1, keepdim=True)
  84. # Compute the covariance for each batch (dim=1)
  85. cov_z_prob_z_score = ((z_prob - mean_z_prob) * (z_score - mean_z_score)).mean(dim=1)
  86. # Compute standard deviations for each batch (dim=1)
  87. std_z_prob = z_prob.std(dim=1)
  88. std_z_score = z_score.std(dim=1)
  89. # Pearson correlation coefficient per batch
  90. pearson_corr_per_batch = cov_z_prob_z_score / (std_z_prob * std_z_score + 1e-6)
  91. # Return the average Pearson correlation over the batch
  92. return pearson_corr_per_batch.mean()
  93. labels = labels[:, 1:].clone()
  94. logits = logits[:, :-1, :]
  95. labels[labels == -100] = tokenizer.pad_token_id
  96. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  97. loss_mask = labels != tokenizer.pad_token_id
  98. log_prob = (per_token_logps * loss_mask).sum(-1) # [N]
  99. norm_log_prob = log_prob / loss_mask.sum(-1) # [N]
  100. list_size = config.list_size
  101. norm_log_prob_grouped = norm_log_prob.view(-1, list_size) # [N, 3]
  102. log_prob_grouped = log_prob.view(-1, list_size) # [N, 3]
  103. # get z scores
  104. if config.normalize_prob:
  105. prob_mean = norm_log_prob_grouped.mean(dim=1, keepdim=True) # [N, 1]
  106. prob_std = norm_log_prob_grouped.std(dim=1, keepdim=True) + 1e-6 # [N, 1]
  107. z_prob = (norm_log_prob_grouped - prob_mean) / prob_std # [N, 3]
  108. else:
  109. prob_mean = log_prob_grouped.mean(dim=1, keepdim=True) # [N, 1]
  110. prob_std = log_prob_grouped.std(dim=1, keepdim=True) + 1e-6 # [N, 1]
  111. z_prob = (log_prob_grouped - prob_mean) / prob_std # [N, 3]
  112. scores = scores.to(z_prob.dtype)
  113. scores_grouped = scores.view(-1, list_size) # [N, 3]
  114. scores_mean = scores_grouped.mean(dim=1, keepdim=True) # [N, 1]
  115. scores_std = scores_grouped.std(dim=1, keepdim=True) + 1e-6 # [N, 1]
  116. z_score = (scores_grouped - scores_mean) / scores_std # [N, 3]
  117. # get calibration loss
  118. loss = -(z_prob * z_score).mean()
  119. pearson = get_pearson_correlation_per_batch(z_prob, z_score)
  120. # Select the log_prob corresponding to the maximum z_score per batch
  121. max_z_score_indices = z_score.argmax(dim=1) # Get the index of the maximum z_score for each batch
  122. nll_loss = -norm_log_prob_grouped.gather(1, max_z_score_indices.unsqueeze(1)).mean() # NLL using max z_score
  123. min_z_score_indices = z_score.argmin(dim=1) # Get the index of the maximum z_score for each batch
  124. reject_nll_loss = -norm_log_prob_grouped.gather(1, min_z_score_indices.unsqueeze(1)).mean() # NLL using min z_score
  125. # CPO loss
  126. log_prob_grouped = log_prob.view(-1, list_size)
  127. chose_nll = log_prob_grouped.gather(1, max_z_score_indices.unsqueeze(1))
  128. reject_nll = log_prob_grouped.gather(1, min_z_score_indices.unsqueeze(1))
  129. cpo_loss = -F.logsigmoid(0.1 * chose_nll - 0.1 * reject_nll).mean()
  130. win_rate = (chose_nll > reject_nll).float().mean().item()
  131. print("=== DEBUG ===")
  132. print("average_log_prob:", norm_log_prob, flush=True)
  133. print("log_prob_grouped:", norm_log_prob_grouped, flush=True)
  134. print("z_prob:", z_prob, flush=True)
  135. print("scores_grouped:", scores_grouped, flush=True)
  136. print("z_score:", z_score, flush=True)
  137. print("Pearson: {}".format(pearson), flush=True)
  138. print("Calibration Loss: {}".format(loss))
  139. print("CPO Loss: {}".format(cpo_loss))
  140. print("Win Rate: {}".format(win_rate))
  141. return loss, pearson, nll_loss, reject_nll_loss, cpo_loss
  142. def get_listwise_loss(logits, scores, labels, tokenizer, xpo_hyper, beta, list_size):
  143. def calculate_kl_divergence_loss(scores, predicted_probs, temperature=1.0):
  144. # 检查能否 reshape 为 (N/3, 3)
  145. if scores.numel() % 3 != 0 or predicted_probs.numel() % 3 != 0:
  146. raise ValueError("scores 和 predicted_probs 的长度必须是 3 的倍数")
  147. # Reshape 成 (N/3, 3)
  148. reshaped_scores = scores.view(-1, 3)
  149. reshaped_predicted_probs = predicted_probs.view(-1, 3)
  150. # 计算 softmax 归一化
  151. true_probs = F.softmax(reshaped_scores / temperature, dim=-1) # 真实分数 softmax
  152. pred_probs = F.softmax(reshaped_predicted_probs / temperature, dim=-1) # 预测分数 softmax
  153. # 计算 KL 散度
  154. kl_loss = F.kl_div(pred_probs.log(), true_probs, reduction='batchmean') # 使用 batchmean 进行归一化
  155. return kl_loss
  156. def soft_rank_loss(scores, predicted_probs, temperature=1.0):
  157. scores = scores.unsqueeze(0) if scores.ndim == 1 else scores
  158. predicted_probs = predicted_probs.unsqueeze(0) if predicted_probs.ndim == 1 else predicted_probs
  159. scores = scores.unsqueeze(0) if scores.ndim == 1 else scores
  160. pairwise_diff = scores - scores.T
  161. pairwise_prob = torch.sigmoid(pairwise_diff / temperature)
  162. soft_ranks = pairwise_prob.sum(dim=-1)
  163. return soft_ranks
  164. labels = labels[:, 1:].clone()
  165. logits = logits[:, :-1, :]
  166. labels[labels == -100] = tokenizer.pad_token_id
  167. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  168. loss_mask = labels != tokenizer.pad_token_id
  169. log_prob = (per_token_logps * loss_mask).sum(-1)
  170. average_log_prob = beta * (log_prob / loss_mask.sum(-1) + xpo_hyper)
  171. predicted_probs = torch.sigmoid(average_log_prob)
  172. scores = scores.to(predicted_probs.dtype)
  173. value_loss = F.binary_cross_entropy(predicted_probs, scores, reduction='mean')
  174. # get listwise loss
  175. reshaped_scores = scores.view(-1, 3)
  176. reshaped_predicted_probs = predicted_probs.view(-1, 3)
  177. kl_loss = calculate_kl_divergence_loss(reshaped_scores, reshaped_predicted_probs)
  178. return value_loss, kl_loss, average_log_prob
  179. class ScaledValueLoss(nn.Module):
  180. def __init__(self, beta_init=1.0, xpo_hyper=20.0):
  181. super().__init__()
  182. self.beta = nn.Parameter(torch.tensor(float(beta_init), requires_grad=True))
  183. self.xpo_hyper = nn.Parameter(torch.tensor(float(xpo_hyper), requires_grad=True))
  184. def forward(self, logits, scores, labels, tokenizer):
  185. labels[labels == -100] = tokenizer.pad_token_id
  186. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  187. loss_mask = labels != tokenizer.pad_token_id
  188. log_prob = (per_token_logps * loss_mask).sum(-1)
  189. average_log_prob = self.beta * (log_prob / loss_mask.sum(-1) + self.xpo_hyper)
  190. predicted_probs = torch.sigmoid(average_log_prob)
  191. scores = scores.to(predicted_probs.dtype)
  192. value_loss = F.binary_cross_entropy(predicted_probs, scores, reduction='mean')
  193. return value_loss, predicted_probs
  194. def get_normalized_value_loss(logits, scores, labels, tokenizer, xpo_hyper, beta):
  195. # TODO, Test if a shift is needed
  196. labels = labels[:, 1:].clone()
  197. logits = logits[:, :-1, :]
  198. labels[labels == -100] = tokenizer.pad_token_id
  199. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  200. loss_mask = labels != tokenizer.pad_token_id
  201. log_prob = (per_token_logps * loss_mask).sum(-1)
  202. log_prob = log_prob / loss_mask.sum(-1)
  203. # get normalized scores
  204. log_prob_batch = log_prob.mean().detach()
  205. average_logits = (log_prob - log_prob_batch) * beta + xpo_hyper
  206. predicted_probs = torch.sigmoid(average_logits)
  207. scores = scores.to(predicted_probs.dtype)
  208. value_loss = F.binary_cross_entropy(predicted_probs, scores, reduction='mean')
  209. return value_loss, average_logits
  210. def get_z_loss(logits, scores, labels, tokenizer, xpo_hyper):
  211. labels[labels == -100] = tokenizer.pad_token_id
  212. per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
  213. loss_mask = labels != tokenizer.pad_token_id
  214. log_prob = (per_token_logps * loss_mask).sum(-1)
  215. log_prob = log_prob / loss_mask.sum(-1)
  216. # get z scores
  217. mean = log_prob.mean().detach()
  218. variance = log_prob.std().detach() + 1e-6
  219. z_score = (log_prob - mean) / variance
  220. scores = scores.to(z_score.dtype)
  221. print(z_score, flush=True)
  222. print(scores, flush=True)
  223. value_loss = F.mse_loss(z_score, scores, reduction='mean')
  224. return value_loss, z_score
  225. def compute_value_loss(train_config, logits, scores, labels, tokenizer, xpo_hyper, beta, loss_module, config=None):
  226. """
  227. Computes the value loss based on the configuration in train_config.
  228. Args:
  229. train_config: Configuration object with loss settings.
  230. logits: Model logits.
  231. scores: Target scores.
  232. labels: Ground truth labels.
  233. tokenizer: Tokenizer instance.
  234. xpo_hyper: Hyperparameters for the loss computation.
  235. Returns:
  236. value_loss: Computed loss.
  237. avg_logits: Average logits or related metric from the chosen loss.
  238. """
  239. if train_config.normalized_value_loss:
  240. print("==== use normalized_value_loss ====", flush=True)
  241. return get_normalized_value_loss(logits, scores, labels, tokenizer, xpo_hyper, beta)
  242. elif train_config.z_value_loss:
  243. print("==== use z_value_loss ====", flush=True)
  244. return get_z_loss(logits, scores, labels, tokenizer, xpo_hyper)
  245. elif train_config.scaled_value_loss:
  246. print("==== use scaled value_loss ====", flush=True)
  247. return loss_module(logits, scores, labels, tokenizer)
  248. elif train_config.listwise_loss:
  249. print("==== use listwise_loss ====", flush=True)
  250. return get_calibration_loss(logits, scores, labels, tokenizer, config)
  251. else:
  252. print("==== use value_loss ====", flush=True)
  253. return get_value_loss(logits, scores, labels, tokenizer, xpo_hyper, beta)
  254. def XPO_train(model, train_dataloader, eval_dataloader, tokenizer, optimizer, lr_scheduler, gradient_accumulation_steps,
  255. train_config, loss_module, fsdp_config=None, local_rank=None, rank=None, wandb_run=None):
  256. """
  257. Trains the model on the given dataloader with XPO objective.
  258. Args:
  259. model: The model to be trained
  260. train_dataloader: The dataloader containing the training data
  261. optimizer: The optimizer used for training
  262. lr_scheduler: The learning rate scheduler
  263. gradient_accumulation_steps: The number of steps to accumulate gradients before performing a backward/update operation
  264. num_epochs: The number of epochs to train for
  265. local_rank: The rank of the current node in a distributed setting
  266. train_config: The training configuration
  267. eval_dataloader: The dataloader containing the eval data
  268. tokenizer: tokenizer used in the eval for decoding the predicitons
  269. Returns: results dictionary containing average training and validation perplexity and loss
  270. """
  271. # Create a gradient scaler for fp16
  272. if train_config.use_fp16 and train_config.enable_fsdp:
  273. scaler = ShardedGradScaler()
  274. elif train_config.use_fp16 and not train_config.enable_fsdp:
  275. scaler = torch.cuda.amp.GradScaler()
  276. if train_config.enable_fsdp:
  277. world_size = int(os.environ["WORLD_SIZE"])
  278. autocast = torch.cuda.amp.autocast if train_config.use_fp16 else nullcontext
  279. train_prep = []
  280. train_loss = []
  281. val_prep = []
  282. val_loss = []
  283. if train_config.save_metrics:
  284. if not os.path.exists(train_config.output_dir):
  285. os.makedirs(train_config.output_dir, exist_ok=True)
  286. metrics_filename = f"{train_config.output_dir}/metrics_data_{local_rank}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.json"
  287. train_step_perplexity = []
  288. train_step_loss = []
  289. val_step_loss = []
  290. val_step_perplexity = []
  291. epoch_times = []
  292. checkpoint_times = []
  293. results = {}
  294. best_val_loss = float("inf")
  295. total_train_steps = 0
  296. max_steps_reached = False # Flag to indicate max training steps reached
  297. # Start the training loop
  298. for epoch in range(train_config.num_epochs):
  299. print(f"Starting epoch {epoch}/{train_config.num_epochs}")
  300. print(f"train_config.max_train_step: {train_config.max_train_step}")
  301. # stop when the maximum number of training steps is reached
  302. if max_steps_reached:
  303. break
  304. epoch_start_time = time.perf_counter()
  305. with MemoryTrace() as memtrace: # track the memory usage
  306. model.train()
  307. total_loss = 0.0
  308. total_length = len(train_dataloader) // gradient_accumulation_steps
  309. pbar = tqdm(colour="blue", desc=f"Training Epoch: {epoch + 1}", total=total_length, dynamic_ncols=True)
  310. with profile(train_config, local_rank) as profile_context:
  311. for step, batch in enumerate(train_dataloader):
  312. total_train_steps += 1
  313. # stop when the maximum number of training steps is reached
  314. if train_config.max_train_step > 0 and total_train_steps > train_config.max_train_step:
  315. max_steps_reached = True
  316. if not train_config.enable_fsdp or local_rank == 0:
  317. print("max training steps reached, stopping training, total train steps finished: ",
  318. total_train_steps - 1)
  319. break
  320. for key in batch.keys():
  321. if train_config.enable_fsdp:
  322. if is_xpu_available():
  323. batch[key] = batch[key].to(torch.device(f"xpu:{local_rank}"))
  324. else:
  325. batch[key] = batch[key].to(local_rank)
  326. else:
  327. if is_xpu_available():
  328. batch[key] = batch[key].to('xpu:0')
  329. elif torch.cuda.is_available():
  330. batch[key] = batch[key].to('cuda:0')
  331. with autocast():
  332. # decoded_texts = [tokenizer.decode(input_ids, skip_special_tokens=True) for input_ids in batch['input_ids']]
  333. # for text in decoded_texts:
  334. # print(text, flush=True)
  335. # print(batch)
  336. model_output = model(**batch)
  337. loss = model_output.loss
  338. logits = model_output.logits
  339. scores = batch['scores']
  340. labels = batch['labels']
  341. xpo_hyper = train_config.xpo_hyper
  342. alpha = train_config.alpha
  343. beta = train_config.beta
  344. gama = train_config.gama
  345. if not train_config.listwise_loss:
  346. value_loss, avg_logits = compute_value_loss(train_config, logits, scores, labels, tokenizer,
  347. xpo_hyper, beta, loss_module, train_config)
  348. else:
  349. value_loss, pearson, chose_nll_loss, reject_nll_loss, cpo_loss = compute_value_loss(train_config, logits, scores, labels, tokenizer,
  350. xpo_hyper, beta, loss_module, train_config)
  351. total_loss += loss.detach().float()
  352. acc_loss = loss / gradient_accumulation_steps
  353. # list-wise
  354. value_acc_loss = value_loss / gradient_accumulation_steps
  355. chose_nll_acc_loss = chose_nll_loss / gradient_accumulation_steps
  356. cpo_acc_loss = cpo_loss / gradient_accumulation_steps
  357. if train_config.listwise_loss:
  358. final_loss = alpha * chose_nll_acc_loss + beta * value_acc_loss + gama * cpo_acc_loss
  359. elif train_config.preferred_finetune:
  360. final_loss = acc_loss
  361. else:
  362. final_loss = alpha * acc_loss + value_acc_loss
  363. if train_config.save_metrics:
  364. train_step_loss.append(loss.detach().float().item())
  365. train_step_perplexity.append(float(torch.exp(loss.detach().float())))
  366. if train_config.use_fp16:
  367. # if fp16 is enabled, use gradient scaler to handle gradient update
  368. scaler.scale(final_loss).backward()
  369. if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
  370. if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:
  371. scaler.unscale_(optimizer)
  372. if train_config.enable_fsdp:
  373. model.clip_grad_norm_(train_config.gradient_clipping_threshold)
  374. else:
  375. torch.nn.utils.clip_grad_norm_(model.parameters(),
  376. train_config.gradient_clipping_threshold)
  377. scaler.step(optimizer)
  378. scaler.update()
  379. optimizer.zero_grad()
  380. pbar.update(1)
  381. else:
  382. # Optimize value loss
  383. final_loss.backward()
  384. if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
  385. if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:
  386. if train_config.enable_fsdp:
  387. model.clip_grad_norm_(train_config.gradient_clipping_threshold)
  388. else:
  389. torch.nn.utils.clip_grad_norm_(model.parameters(),
  390. train_config.gradient_clipping_threshold)
  391. optimizer.step()
  392. optimizer.zero_grad()
  393. pbar.update(1)
  394. if train_config.use_profiler or train_config.flop_counter:
  395. profile_context.step()
  396. if train_config.flop_counter and profile_context.is_done():
  397. TFlops = profile_context.get_flops_per_sec() / 1e12
  398. if wandb_run:
  399. if not train_config.enable_fsdp or rank == 0:
  400. if train_config.scaled_value_loss:
  401. wandb_run.log({
  402. 'train/epoch': epoch + 1,
  403. 'train/loss': loss.detach().float(),
  404. 'train/avg_logits': avg_logits.detach().float().mean().item(),
  405. 'train/value_loss': value_loss.detach().float(),
  406. 'train/xpo_hyper': loss_module.xpo_hyper.detach().float().item(),
  407. 'train/beta': loss_module.beta.detach().float().item(),
  408. })
  409. elif train_config.listwise_loss:
  410. wandb_run.log({
  411. 'train/epoch': epoch + 1,
  412. 'train/nll': loss.detach().float(),
  413. 'train/chose_nll': chose_nll_loss.detach().float().mean().item(),
  414. "train/reject_nll": reject_nll_loss.detach().float().mean().item(),
  415. 'train/value_loss': value_loss.detach().float(),
  416. 'train/pearson': pearson.detach().float(),
  417. })
  418. else:
  419. wandb_run.log({
  420. 'train/epoch': epoch + 1,
  421. 'train/loss': loss.detach().float(),
  422. 'train/avg_logits': avg_logits.detach().float().mean().item(),
  423. 'train/value_loss': value_loss.detach().float(),
  424. })
  425. pbar.set_description(
  426. f"Training Epoch: {epoch + 1}/{train_config.num_epochs}, step {step}/{len(train_dataloader)}"
  427. f" completed (loss: {loss.detach().float()}, value_loss: {value_loss.detach().float()})")
  428. if train_config.save_metrics:
  429. save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep,
  430. val_step_loss, val_loss, val_step_perplexity, val_prep)
  431. pbar.close()
  432. epoch_end_time = time.perf_counter() - epoch_start_time
  433. epoch_times.append(epoch_end_time)
  434. # Reducing total_loss across all devices if there's more than one CUDA device
  435. if is_xpu_available() and (torch.xpu.device_count() > 1 and train_config.enable_fsdp):
  436. dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
  437. elif torch.cuda.device_count() > 1 and train_config.enable_fsdp:
  438. dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
  439. train_epoch_loss = total_loss / len(train_dataloader)
  440. if train_config.enable_fsdp:
  441. train_epoch_loss = train_epoch_loss / world_size
  442. train_perplexity = torch.exp(train_epoch_loss)
  443. train_prep.append(float(train_perplexity))
  444. train_loss.append(float(train_epoch_loss))
  445. if not train_config.enable_fsdp or rank == 0:
  446. memtrace.print_stats()
  447. # Update the learning rate as needed
  448. lr_scheduler.step()
  449. should_save_model = train_config.save_model
  450. if train_config.run_validation:
  451. eval_ppl, eval_epoch_loss, temp_val_loss, temp_step_perplexity = XPO_evaluation(model, train_config, loss_module,
  452. eval_dataloader, local_rank,
  453. tokenizer, wandb_run)
  454. if train_config.save_metrics:
  455. val_step_loss.extend(temp_val_loss)
  456. val_step_perplexity.extend(temp_step_perplexity)
  457. should_save_model = train_config.save_model and eval_epoch_loss < best_val_loss
  458. # TODO, save each epoch
  459. should_save_model = True
  460. checkpoint_start_time = time.perf_counter()
  461. if should_save_model:
  462. if train_config.enable_fsdp:
  463. dist.barrier()
  464. if train_config.use_peft:
  465. if train_config.enable_fsdp:
  466. if rank == 0:
  467. print(f"we are about to save the PEFT modules")
  468. else:
  469. print(f"we are about to save the PEFT modules")
  470. save_peft_checkpoint(model, os.path.join(train_config.output_dir, str(epoch)))
  471. if train_config.enable_fsdp:
  472. if rank == 0:
  473. print(f"PEFT modules are saved in {train_config.output_dir} directory")
  474. else:
  475. print(f"PEFT modules are saved in {train_config.output_dir} directory")
  476. else:
  477. if not train_config.enable_fsdp:
  478. save_model_checkpoint(model, train_config.output_dir)
  479. elif fsdp_config.checkpoint_type == StateDictType.FULL_STATE_DICT:
  480. print(" Saving the FSDP model checkpoint using FULL_STATE_DICT")
  481. print("=====================================================")
  482. save_fsdp_model_checkpoint_full(
  483. model, optimizer, rank, train_config, epoch=epoch
  484. )
  485. if train_config.save_optimizer:
  486. print(" Saving the FSDP optimizer using FULL_STATE_DICT")
  487. print("=====================================================")
  488. save_optimizer_checkpoint(
  489. model, optimizer, rank, train_config, epoch=epoch
  490. )
  491. elif fsdp_config.checkpoint_type == StateDictType.SHARDED_STATE_DICT:
  492. if train_config.save_optimizer:
  493. print(" Saving the FSDP model checkpoints using SHARDED_STATE_DICT")
  494. print("=====================================================")
  495. save_model_and_optimizer_sharded(model, rank, train_config, optim=optimizer)
  496. else:
  497. print(" Saving the FSDP model checkpoints and optimizer using SHARDED_STATE_DICT")
  498. print("=====================================================")
  499. save_model_and_optimizer_sharded(model, rank, train_config)
  500. if train_config.enable_fsdp:
  501. dist.barrier()
  502. checkpoint_end_time = time.perf_counter() - checkpoint_start_time
  503. checkpoint_times.append(checkpoint_end_time)
  504. if train_config.run_validation:
  505. if eval_epoch_loss < best_val_loss:
  506. best_val_loss = eval_epoch_loss
  507. if train_config.enable_fsdp:
  508. if rank == 0:
  509. print(f"best eval loss on epoch {epoch + 1} is {best_val_loss}")
  510. else:
  511. print(f"best eval loss on epoch {epoch + 1} is {best_val_loss}")
  512. val_loss.append(float(eval_epoch_loss))
  513. val_prep.append(float(eval_ppl))
  514. if train_config.enable_fsdp:
  515. if rank == 0:
  516. print(
  517. f"Epoch {epoch + 1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")
  518. else:
  519. print(
  520. f"Epoch {epoch + 1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")
  521. # Saving the results every epoch to plot later
  522. if train_config.save_metrics:
  523. save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep,
  524. val_step_loss, val_loss, val_step_perplexity, val_prep)
  525. # TODO, Early stopping
  526. # if len(val_loss) == 1:
  527. # continue
  528. # elif val_loss[-1] > val_loss[-2]:
  529. # print("Performance drops from Epoch {}, early stop.")
  530. # break
  531. avg_epoch_time = sum(epoch_times) / len(epoch_times)
  532. avg_checkpoint_time = sum(checkpoint_times) / len(checkpoint_times) if len(checkpoint_times) > 0 else 0
  533. avg_train_prep = sum(train_prep) / len(train_prep)
  534. avg_train_loss = sum(train_loss) / len(train_loss)
  535. if train_config.run_validation:
  536. avg_eval_prep = sum(val_prep) / len(val_prep)
  537. avg_eval_loss = sum(val_loss) / len(val_loss)
  538. results['avg_train_prep'] = avg_train_prep
  539. results['avg_train_loss'] = avg_train_loss
  540. if train_config.run_validation:
  541. results['avg_eval_prep'] = avg_eval_prep
  542. results['avg_eval_loss'] = avg_eval_loss
  543. results["avg_epoch_time"] = avg_epoch_time
  544. results["avg_checkpoint_time"] = avg_checkpoint_time
  545. if train_config.save_metrics:
  546. results["metrics_filename"] = metrics_filename
  547. if train_config.flop_counter:
  548. results["model_tflops"] = TFlops
  549. # saving the training params including fsdp setting for reference.
  550. if train_config.enable_fsdp and not train_config.use_peft and rank == 0:
  551. save_train_params(train_config, fsdp_config, rank)
  552. return results
  553. def XPO_evaluation(model, train_config, loss_module, eval_dataloader, local_rank, tokenizer, wandb_run):
  554. """
  555. Evaluates the model on the given dataloader
  556. Args:
  557. model: The model to evaluate
  558. eval_dataloader: The dataloader containing the evaluation data
  559. local_rank: The rank of the current node in a distributed setting
  560. tokenizer: The tokenizer used to decode predictions
  561. Returns: eval_ppl, eval_epoch_loss
  562. """
  563. if train_config.enable_fsdp:
  564. world_size = int(os.environ["WORLD_SIZE"])
  565. model.eval()
  566. eval_preds = []
  567. val_step_loss = []
  568. val_step_perplexity = []
  569. eval_loss = 0.0 # Initialize evaluation loss
  570. total_eval_steps = 0
  571. with MemoryTrace() as memtrace:
  572. for step, batch in enumerate(tqdm(eval_dataloader, colour="green", desc="evaluating Epoch", dynamic_ncols=True)):
  573. total_eval_steps += 1
  574. # stop when the maximum number of eval steps is reached
  575. if train_config.max_eval_step > 0 and total_eval_steps > train_config.max_eval_step:
  576. if not train_config.enable_fsdp or local_rank==0:
  577. print("max eval steps reached, stopping evaluation, total_eval_steps: ", total_eval_steps - 1)
  578. break
  579. for key in batch.keys():
  580. if train_config.enable_fsdp:
  581. batch[key] = batch[key].to(local_rank)
  582. else:
  583. if is_xpu_available():
  584. batch[key] = batch[key].to('xpu:0')
  585. else:
  586. batch[key] = batch[key].to('cuda:0')
  587. # Ensure no gradients are computed for this scope to save memory
  588. with torch.no_grad():
  589. # Forward pass and compute loss
  590. outputs = model(**batch)
  591. loss = outputs.loss
  592. if train_config.save_metrics:
  593. val_step_loss.append(loss.detach().float().item())
  594. val_step_perplexity.append(float(torch.exp(loss.detach().float())))
  595. eval_loss += loss.detach().float()
  596. # Decode predictions and add to evaluation predictions list
  597. preds = torch.argmax(outputs.logits, -1)
  598. eval_preds.extend(
  599. tokenizer.batch_decode(preds.detach().cpu().numpy(), skip_special_tokens=True)
  600. )
  601. # Get Eval value loss
  602. logits = outputs.logits
  603. scores = batch['scores']
  604. labels = batch['labels']
  605. xpo_hyper = train_config.xpo_hyper
  606. beta = train_config.beta
  607. if not train_config.listwise_loss:
  608. eval_value_loss, eval_avg_logits = compute_value_loss(train_config, logits, scores, labels, tokenizer,
  609. xpo_hyper, beta, loss_module, train_config)
  610. else:
  611. eval_value_loss, eval_kl_loss, eval_chose_nll, eval_reject_nll, cpo_loss = compute_value_loss(train_config, logits, scores, labels, tokenizer,
  612. xpo_hyper, beta, loss_module, train_config)
  613. if wandb_run:
  614. wandb_run.log({
  615. 'eval/step': total_eval_steps,
  616. 'eval/nll': loss.detach().float(),
  617. 'eval/chose_nll': eval_chose_nll.detach().float().mean().item(),
  618. "eval/reject_nll": eval_reject_nll.detach().float().mean().item(),
  619. 'eval/value_loss': eval_value_loss.detach().float(),
  620. 'eval/pearson': eval_kl_loss.detach().float(),
  621. }, commit=False)
  622. # If there's more than one CUDA device, reduce evaluation loss across all devices
  623. if is_xpu_available() and (torch.xpu.device_count() > 1 and train_config.enable_fsdp):
  624. dist.all_reduce(eval_loss, op=dist.ReduceOp.SUM)
  625. if torch.cuda.device_count() > 1 and train_config.enable_fsdp:
  626. dist.all_reduce(eval_loss, op=dist.ReduceOp.SUM)
  627. # Compute average loss and perplexity
  628. eval_epoch_loss = eval_loss / len(eval_dataloader)
  629. if train_config.enable_fsdp:
  630. eval_epoch_loss = eval_epoch_loss/world_size
  631. eval_ppl = torch.exp(eval_epoch_loss)
  632. # Print evaluation metrics
  633. if train_config.enable_fsdp:
  634. if local_rank==0:
  635. print(f" {eval_ppl=} {eval_epoch_loss=}")
  636. else:
  637. print(f" {eval_ppl=} {eval_epoch_loss=}")
  638. if wandb_run:
  639. wandb_run.log({
  640. 'eval/perplexity': eval_ppl,
  641. 'eval/loss': eval_epoch_loss,
  642. })
  643. return eval_ppl, eval_epoch_loss, val_step_loss, val_step_perplexity
  644. def train(model, train_dataloader, eval_dataloader, tokenizer, optimizer, lr_scheduler, gradient_accumulation_steps,
  645. train_config, fsdp_config=None, local_rank=None, rank=None, wandb_run=None):
  646. """
  647. Trains the model on the given dataloader
  648. Args:
  649. model: The model to be trained
  650. train_dataloader: The dataloader containing the training data
  651. optimizer: The optimizer used for training
  652. lr_scheduler: The learning rate scheduler
  653. gradient_accumulation_steps: The number of steps to accumulate gradients before performing a backward/update operation
  654. num_epochs: The number of epochs to train for
  655. local_rank: The rank of the current node in a distributed setting
  656. train_config: The training configuration
  657. eval_dataloader: The dataloader containing the eval data
  658. tokenizer: tokenizer used in the eval for decoding the predicitons
  659. Returns: results dictionary containing average training and validation perplexity and loss
  660. """
  661. # Create a gradient scaler for fp16
  662. if train_config.use_fp16 and train_config.enable_fsdp:
  663. scaler = ShardedGradScaler()
  664. elif train_config.use_fp16 and not train_config.enable_fsdp:
  665. scaler = torch.cuda.amp.GradScaler()
  666. if train_config.enable_fsdp:
  667. world_size = int(os.environ["WORLD_SIZE"])
  668. autocast = torch.cuda.amp.autocast if train_config.use_fp16 else nullcontext
  669. train_prep = []
  670. train_loss = []
  671. val_prep = []
  672. val_loss = []
  673. if train_config.save_metrics:
  674. if not os.path.exists(train_config.output_dir):
  675. os.makedirs(train_config.output_dir, exist_ok=True)
  676. metrics_filename = f"{train_config.output_dir}/metrics_data_{local_rank}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.json"
  677. train_step_perplexity = []
  678. train_step_loss = []
  679. val_step_loss = []
  680. val_step_perplexity = []
  681. epoch_times = []
  682. checkpoint_times = []
  683. results = {}
  684. best_val_loss = float("inf")
  685. total_train_steps = 0
  686. max_steps_reached = False # Flag to indicate max training steps reached
  687. # Start the training loop
  688. for epoch in range(train_config.num_epochs):
  689. print(f"Starting epoch {epoch}/{train_config.num_epochs}")
  690. print(f"train_config.max_train_step: {train_config.max_train_step}")
  691. # stop when the maximum number of training steps is reached
  692. if max_steps_reached:
  693. break
  694. epoch_start_time = time.perf_counter()
  695. with MemoryTrace() as memtrace: # track the memory usage
  696. model.train()
  697. total_loss = 0.0
  698. total_length = len(train_dataloader) // gradient_accumulation_steps
  699. pbar = tqdm(colour="blue", desc=f"Training Epoch: {epoch + 1}", total=total_length, dynamic_ncols=True)
  700. with profile(train_config, local_rank) as profile_context:
  701. for step, batch in enumerate(train_dataloader):
  702. total_train_steps += 1
  703. # stop when the maximum number of training steps is reached
  704. if train_config.max_train_step > 0 and total_train_steps > train_config.max_train_step:
  705. max_steps_reached = True
  706. if not train_config.enable_fsdp or local_rank == 0:
  707. print("max training steps reached, stopping training, total train steps finished: ",
  708. total_train_steps - 1)
  709. break
  710. for key in batch.keys():
  711. if train_config.enable_fsdp:
  712. if is_xpu_available():
  713. batch[key] = batch[key].to(torch.device(f"xpu:{local_rank}"))
  714. else:
  715. batch[key] = batch[key].to(local_rank)
  716. else:
  717. if is_xpu_available():
  718. batch[key] = batch[key].to('xpu:0')
  719. elif torch.cuda.is_available():
  720. batch[key] = batch[key].to('cuda:0')
  721. with autocast():
  722. loss = model(**batch).loss
  723. total_loss += loss.detach().float()
  724. loss = loss / gradient_accumulation_steps
  725. if train_config.save_metrics:
  726. train_step_loss.append(loss.detach().float().item())
  727. train_step_perplexity.append(float(torch.exp(loss.detach().float())))
  728. if train_config.use_fp16:
  729. # if fp16 is enabled, use gradient scaler to handle gradient update
  730. scaler.scale(loss).backward()
  731. if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
  732. if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:
  733. scaler.unscale_(optimizer)
  734. if train_config.enable_fsdp:
  735. model.clip_grad_norm_(train_config.gradient_clipping_threshold)
  736. else:
  737. torch.nn.utils.clip_grad_norm_(model.parameters(),
  738. train_config.gradient_clipping_threshold)
  739. scaler.step(optimizer)
  740. scaler.update()
  741. optimizer.zero_grad()
  742. pbar.update(1)
  743. else:
  744. # regular backpropagation when fp16 is not used
  745. loss.backward()
  746. if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
  747. if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:
  748. if train_config.enable_fsdp:
  749. model.clip_grad_norm_(train_config.gradient_clipping_threshold)
  750. else:
  751. torch.nn.utils.clip_grad_norm_(model.parameters(),
  752. train_config.gradient_clipping_threshold)
  753. optimizer.step()
  754. optimizer.zero_grad()
  755. pbar.update(1)
  756. if train_config.use_profiler or train_config.flop_counter:
  757. profile_context.step()
  758. if train_config.flop_counter and profile_context.is_done():
  759. TFlops = profile_context.get_flops_per_sec() / 1e12
  760. if wandb_run:
  761. if not train_config.enable_fsdp or rank == 0:
  762. wandb_run.log({
  763. 'train/epoch': epoch + 1,
  764. 'train/step': epoch * len(train_dataloader) + step,
  765. 'train/loss': loss.detach().float(),
  766. })
  767. pbar.set_description(
  768. f"Training Epoch: {epoch + 1}/{train_config.num_epochs}, step {step}/{len(train_dataloader)} completed (loss: {loss.detach().float()})")
  769. if train_config.save_metrics:
  770. save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep,
  771. val_step_loss, val_loss, val_step_perplexity, val_prep)
  772. pbar.close()
  773. epoch_end_time = time.perf_counter() - epoch_start_time
  774. epoch_times.append(epoch_end_time)
  775. # Reducing total_loss across all devices if there's more than one CUDA device
  776. if is_xpu_available() and (torch.xpu.device_count() > 1 and train_config.enable_fsdp):
  777. dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
  778. elif torch.cuda.device_count() > 1 and train_config.enable_fsdp:
  779. dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
  780. train_epoch_loss = total_loss / len(train_dataloader)
  781. if train_config.enable_fsdp:
  782. train_epoch_loss = train_epoch_loss / world_size
  783. train_perplexity = torch.exp(train_epoch_loss)
  784. train_prep.append(float(train_perplexity))
  785. train_loss.append(float(train_epoch_loss))
  786. if not train_config.enable_fsdp or rank == 0:
  787. memtrace.print_stats()
  788. # Update the learning rate as needed
  789. lr_scheduler.step()
  790. should_save_model = train_config.save_model
  791. if train_config.run_validation:
  792. eval_ppl, eval_epoch_loss, temp_val_loss, temp_step_perplexity = evaluation(model, train_config,
  793. eval_dataloader, local_rank,
  794. tokenizer, wandb_run)
  795. if train_config.save_metrics:
  796. val_step_loss.extend(temp_val_loss)
  797. val_step_perplexity.extend(temp_step_perplexity)
  798. should_save_model = train_config.save_model and eval_epoch_loss < best_val_loss
  799. # TODO, save for each epoch
  800. should_save_model = True
  801. checkpoint_start_time = time.perf_counter()
  802. if should_save_model:
  803. if train_config.enable_fsdp:
  804. dist.barrier()
  805. if train_config.use_peft:
  806. if train_config.enable_fsdp:
  807. if rank == 0:
  808. print(f"we are about to save the PEFT modules")
  809. else:
  810. print(f"we are about to save the PEFT modules")
  811. save_peft_checkpoint(model, os.path.join(train_config.output_dir, str(epoch)))
  812. if train_config.enable_fsdp:
  813. if rank == 0:
  814. print(f"PEFT modules are saved in {train_config.output_dir} directory")
  815. else:
  816. print(f"PEFT modules are saved in {train_config.output_dir} directory")
  817. else:
  818. if not train_config.enable_fsdp:
  819. save_model_checkpoint(model, train_config.output_dir)
  820. elif fsdp_config.checkpoint_type == StateDictType.FULL_STATE_DICT:
  821. print(" Saving the FSDP model checkpoint using FULL_STATE_DICT")
  822. print("=====================================================")
  823. save_fsdp_model_checkpoint_full(
  824. model, optimizer, rank, train_config, epoch=epoch
  825. )
  826. if train_config.save_optimizer:
  827. print(" Saving the FSDP optimizer using FULL_STATE_DICT")
  828. print("=====================================================")
  829. save_optimizer_checkpoint(
  830. model, optimizer, rank, train_config, epoch=epoch
  831. )
  832. elif fsdp_config.checkpoint_type == StateDictType.SHARDED_STATE_DICT:
  833. if train_config.save_optimizer:
  834. print(" Saving the FSDP model checkpoints using SHARDED_STATE_DICT")
  835. print("=====================================================")
  836. save_model_and_optimizer_sharded(model, rank, train_config, optim=optimizer)
  837. else:
  838. print(" Saving the FSDP model checkpoints and optimizer using SHARDED_STATE_DICT")
  839. print("=====================================================")
  840. save_model_and_optimizer_sharded(model, rank, train_config)
  841. if train_config.enable_fsdp:
  842. dist.barrier()
  843. checkpoint_end_time = time.perf_counter() - checkpoint_start_time
  844. checkpoint_times.append(checkpoint_end_time)
  845. if train_config.run_validation:
  846. if eval_epoch_loss < best_val_loss:
  847. best_val_loss = eval_epoch_loss
  848. if train_config.enable_fsdp:
  849. if rank == 0:
  850. print(f"best eval loss on epoch {epoch + 1} is {best_val_loss}")
  851. else:
  852. print(f"best eval loss on epoch {epoch + 1} is {best_val_loss}")
  853. val_loss.append(float(eval_epoch_loss))
  854. val_prep.append(float(eval_ppl))
  855. if train_config.enable_fsdp:
  856. if rank == 0:
  857. print(
  858. f"Epoch {epoch + 1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")
  859. else:
  860. print(
  861. f"Epoch {epoch + 1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")
  862. # Saving the results every epoch to plot later
  863. if train_config.save_metrics:
  864. save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep,
  865. val_step_loss, val_loss, val_step_perplexity, val_prep)
  866. # Early stopping
  867. # if len(val_loss) == 1:
  868. # continue
  869. # elif val_loss[-1] > val_loss[-2]:
  870. # print("Performance drops from Epoch {}, early stop.")
  871. # break
  872. avg_epoch_time = sum(epoch_times) / len(epoch_times)
  873. avg_checkpoint_time = sum(checkpoint_times) / len(checkpoint_times) if len(checkpoint_times) > 0 else 0
  874. avg_train_prep = sum(train_prep) / len(train_prep)
  875. avg_train_loss = sum(train_loss) / len(train_loss)
  876. if train_config.run_validation:
  877. avg_eval_prep = sum(val_prep) / len(val_prep)
  878. avg_eval_loss = sum(val_loss) / len(val_loss)
  879. results['avg_train_prep'] = avg_train_prep
  880. results['avg_train_loss'] = avg_train_loss
  881. if train_config.run_validation:
  882. results['avg_eval_prep'] = avg_eval_prep
  883. results['avg_eval_loss'] = avg_eval_loss
  884. results["avg_epoch_time"] = avg_epoch_time
  885. results["avg_checkpoint_time"] = avg_checkpoint_time
  886. if train_config.save_metrics:
  887. results["metrics_filename"] = metrics_filename
  888. if train_config.flop_counter:
  889. results["model_tflops"] = TFlops
  890. # saving the training params including fsdp setting for reference.
  891. if train_config.enable_fsdp and not train_config.use_peft and rank == 0:
  892. save_train_params(train_config, fsdp_config, rank)
  893. return results
  894. def evaluation(model, train_config, eval_dataloader, local_rank, tokenizer, wandb_run):
  895. """
  896. Evaluates the model on the given dataloader
  897. Args:
  898. model: The model to evaluate
  899. eval_dataloader: The dataloader containing the evaluation data
  900. local_rank: The rank of the current node in a distributed setting
  901. tokenizer: The tokenizer used to decode predictions
  902. Returns: eval_ppl, eval_epoch_loss
  903. """
  904. if train_config.enable_fsdp:
  905. world_size = int(os.environ["WORLD_SIZE"])
  906. model.eval()
  907. eval_preds = []
  908. val_step_loss = []
  909. val_step_perplexity = []
  910. eval_loss = 0.0 # Initialize evaluation loss
  911. total_eval_steps = 0
  912. with MemoryTrace() as memtrace:
  913. for step, batch in enumerate(tqdm(eval_dataloader, colour="green", desc="evaluating Epoch", dynamic_ncols=True)):
  914. total_eval_steps += 1
  915. # stop when the maximum number of eval steps is reached
  916. if train_config.max_eval_step > 0 and total_eval_steps > train_config.max_eval_step:
  917. if not train_config.enable_fsdp or local_rank==0:
  918. print("max eval steps reached, stopping evaluation, total_eval_steps: ", total_eval_steps - 1)
  919. break
  920. for key in batch.keys():
  921. if train_config.enable_fsdp:
  922. batch[key] = batch[key].to(local_rank)
  923. else:
  924. if is_xpu_available():
  925. batch[key] = batch[key].to('xpu:0')
  926. else:
  927. batch[key] = batch[key].to('cuda:0')
  928. # Ensure no gradients are computed for this scope to save memory
  929. with torch.no_grad():
  930. # Forward pass and compute loss
  931. outputs = model(**batch)
  932. loss = outputs.loss
  933. if train_config.save_metrics:
  934. val_step_loss.append(loss.detach().float().item())
  935. val_step_perplexity.append(float(torch.exp(loss.detach().float())))
  936. eval_loss += loss.detach().float()
  937. # Decode predictions and add to evaluation predictions list
  938. preds = torch.argmax(outputs.logits, -1)
  939. eval_preds.extend(
  940. tokenizer.batch_decode(preds.detach().cpu().numpy(), skip_special_tokens=True)
  941. )
  942. # If there's more than one CUDA device, reduce evaluation loss across all devices
  943. if is_xpu_available() and (torch.xpu.device_count() > 1 and train_config.enable_fsdp):
  944. dist.all_reduce(eval_loss, op=dist.ReduceOp.SUM)
  945. if torch.cuda.device_count() > 1 and train_config.enable_fsdp:
  946. dist.all_reduce(eval_loss, op=dist.ReduceOp.SUM)
  947. # Compute average loss and perplexity
  948. eval_epoch_loss = eval_loss / len(eval_dataloader)
  949. if train_config.enable_fsdp:
  950. eval_epoch_loss = eval_epoch_loss/world_size
  951. eval_ppl = torch.exp(eval_epoch_loss)
  952. # Print evaluation metrics
  953. if train_config.enable_fsdp:
  954. if local_rank==0:
  955. print(f" {eval_ppl=} {eval_epoch_loss=}")
  956. else:
  957. print(f" {eval_ppl=} {eval_epoch_loss=}")
  958. if wandb_run:
  959. wandb_run.log({
  960. 'eval/perplexity': eval_ppl,
  961. 'eval/loss': eval_epoch_loss,
  962. }, commit=False)
  963. return eval_ppl, eval_epoch_loss, val_step_loss, val_step_perplexity
  964. def freeze_transformer_layers(model, num_layer):
  965. for i, layer in enumerate(model.model.layers):
  966. if i < num_layer:
  967. for param in layer.parameters():
  968. param.requires_grad = False
  969. def check_frozen_layers_peft_model(model):
  970. for i, layer in enumerate(model.base_model.model.model.layers):
  971. for name, param in layer.named_parameters():
  972. print(f"Layer {i}, parameter {name}: requires_grad = {param.requires_grad}")
  973. def setup():
  974. """Initialize the process group for distributed training"""
  975. if is_ccl_available():
  976. # distributed training on xpus
  977. dist.init_process_group("ccl")
  978. else:
  979. dist.init_process_group("nccl")
  980. def setup_environ_flags(rank):
  981. """Set environment flags for debugging purposes"""
  982. os.environ["TORCH_SHOW_CPP_STACKTRACES"] = str(1)
  983. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
  984. # os.environ["TORCH_DISTRIBUTED_DEBUG"] = "DETAIL"
  985. # This flag will help with CUDA memory fragmentations that can lead into OOM in some cases.
  986. # Note this is only available in PyTorch Nighlies (as of July 30 2023)
  987. # os.environ['PYTORCH_CUDA_ALLOC_CONF']='expandable_segments:True'
  988. if rank == 0:
  989. print(f"--> Running with torch dist debug set to detail")
  990. def cleanup():
  991. """Clean up the process group after training"""
  992. dist.destroy_process_group()
  993. def clear_gpu_cache(rank=None):
  994. """Clear the GPU cache for all ranks"""
  995. if rank == 0:
  996. print(f"Clearing GPU cache for all ranks")
  997. if is_xpu_available():
  998. torch.xpu_empty_cache()
  999. else:
  1000. torch.cuda.empty_cache()
  1001. def get_parameter_dtypes(model):
  1002. """Get the data types of model parameters"""
  1003. parameter_dtypes = {}
  1004. for name, parameter in model.named_parameters():
  1005. parameter_dtypes[name] = parameter.dtype
  1006. return parameter_dtypes
  1007. def print_model_size(model, config, rank: int = 0) -> None:
  1008. """
  1009. Print model name, the number of trainable parameters and initialization time.
  1010. Args:
  1011. model: The PyTorch model.
  1012. model_name (str): Name of the model.
  1013. init_time_start (float): Initialization start time.
  1014. init_time_end (float): Initialization end time.
  1015. rank (int, optional): Current process's rank. Defaults to 0.
  1016. """
  1017. if rank == 0:
  1018. print(f"--> Model {config.model_name}")
  1019. total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
  1020. print(f"\n--> {config.model_name} has {total_params / 1e6} Million params\n")
  1021. def get_policies(cfg, rank):
  1022. """Get the policies for mixed precision and fsdp wrapping"""
  1023. verify_bfloat_support = ((
  1024. torch.version.cuda
  1025. and torch.cuda.is_bf16_supported()
  1026. and torch.version.cuda >= "11.0"
  1027. and dist.is_nccl_available()
  1028. and nccl.version() >= (2, 10)
  1029. ) or
  1030. (is_xpu_available()))
  1031. mixed_precision_policy = None
  1032. wrapping_policy = None
  1033. # Mixed precision
  1034. if cfg.mixed_precision:
  1035. bf16_ready = verify_bfloat_support
  1036. if bf16_ready and not cfg.use_fp16:
  1037. mixed_precision_policy = bfSixteen
  1038. if rank == 0:
  1039. print(f"bFloat16 enabled for mixed precision - using bfSixteen policy")
  1040. elif cfg.use_fp16:
  1041. mixed_precision_policy = fpSixteen
  1042. if rank == 0:
  1043. print(f"FP16 enabled")
  1044. else:
  1045. print(f"bFloat16 support not present. Using FP32, and not mixed precision")
  1046. wrapping_policy = get_llama_wrapper()
  1047. return mixed_precision_policy, wrapping_policy
  1048. def save_train_params(train_config, fsdp_config, rank):
  1049. """
  1050. This function saves the train_config and FSDP config into a train_params.yaml.
  1051. This will be used by converter script in the inference folder to fetch the HF model name or path.
  1052. It also would be hepful as a log for future references.
  1053. """
  1054. # Convert the train_config and fsdp_config objects to dictionaries,
  1055. # converting all values to strings to ensure they can be serialized into a YAML file
  1056. train_config_dict = {k: str(v) for k, v in vars(train_config).items() if not k.startswith('__')}
  1057. fsdp_config_dict = {k: str(v) for k, v in vars(fsdp_config).items() if not k.startswith('__')}
  1058. # Merge the two dictionaries into one
  1059. train_params_dict = {**train_config_dict, **fsdp_config_dict}
  1060. # Construct the folder name (follwoing FSDP checkpointing style) using properties of the train_config object
  1061. folder_name = (
  1062. train_config.dist_checkpoint_root_folder
  1063. + "/"
  1064. + train_config.dist_checkpoint_folder
  1065. + "-"
  1066. + train_config.model_name
  1067. )
  1068. save_dir = Path.cwd() / folder_name
  1069. # If the directory does not exist, create it
  1070. if not os.path.exists(save_dir):
  1071. os.makedirs(save_dir)
  1072. # Convert the dictionary to a YAML string
  1073. config_yaml = yaml.dump(train_params_dict, indent=4)
  1074. file_name = os.path.join(save_dir,'train_params.yaml')
  1075. # Check if there's a directory with the same name as the file
  1076. if os.path.isdir(file_name):
  1077. print(f"Error: {file_name} is a directory, not a file.")
  1078. else:
  1079. # Write the YAML string to the file
  1080. with open(file_name, 'w') as f:
  1081. f.write(config_yaml)
  1082. if rank==0:
  1083. print(f"training params are saved in {file_name}")
  1084. def save_to_json(output_filename, train_step_loss, train_epoch_loss, train_step_ppl, train_epoch_ppl, val_step_loss, val_epoch_loss, val_step_ppl, val_epoch_ppl):
  1085. metrics_data = {
  1086. "train_step_loss": train_step_loss,
  1087. "train_epoch_loss": train_epoch_loss,
  1088. "train_step_perplexity": train_step_ppl,
  1089. "train_epoch_perplexity": train_epoch_ppl,
  1090. "val_step_loss": val_step_loss,
  1091. "val_epoch_loss": val_epoch_loss,
  1092. "val_step_perplexity": val_step_ppl,
  1093. "val_epoch_perplexity": val_epoch_ppl
  1094. }
  1095. with open(output_filename, "w") as f:
  1096. json.dump(metrics_data, f)