We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6d12adb commit 6ced3b8Copy full SHA for 6ced3b8
verl/trainer/fsdp_sft_trainer.py
@@ -30,7 +30,7 @@
30
import hydra
31
import torch
32
import torch.distributed
33
-from omegaconf import DictConfig
+from omegaconf import DictConfig, OmegaConf
34
from peft import LoraConfig, TaskType, get_peft_model
35
from tensordict import TensorDict
36
from torch import nn, optim
@@ -688,6 +688,7 @@ def fit(self):
688
project_name=self.config.trainer.project_name,
689
experiment_name=self.config.trainer.experiment_name,
690
default_backend=self.config.trainer.logger,
691
+ config=OmegaConf.to_container(self.config, resolve=True),
692
)
693
694
global_step = self.resume_global_step # Start from resumed step
0 commit comments