feat: log LR in CLM fine-tune script
This commit is contained in:
parent
96b41dee60
commit
e99277ec52
|
@ -600,6 +600,7 @@ def main():
|
|||
train_perplexity = float("inf")
|
||||
writer.add_scalar("Train/Perplexity (Step)", train_perplexity, global_step)
|
||||
writer.add_scalar("Train/Loss (Step)", loss, global_step)
|
||||
writer.add_scalar("Train/Learning Rate (Step)", lr_scheduler.get_last_lr()[-1], global_step)
|
||||
|
||||
if args.output_dir is not None and args.checkpointing_steps is not None:
|
||||
if args.checkpointing_steps != "epoch" and completed_steps % int(args.checkpointing_steps) == 0:
|
||||
|
|
Loading…
Reference in New Issue