Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion verl/workers/actor/dp_actor.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,8 @@ def update_policy(self, data: DataProto):
# See PPO paper for details. https://arxiv.org/abs/1707.06347
mini_batches = data.split(self.config.ppo_mini_batch_size)

on_policy = len(mini_batches) == 1 and self.config.ppo_epochs == 1

metrics = {}
for _ in range(self.config.ppo_epochs):
for batch_idx, mini_batch in enumerate(mini_batches):
Expand All @@ -405,7 +407,6 @@ def update_policy(self, data: DataProto):
micro_batch_metrics = {}
model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch}
response_mask = model_inputs["response_mask"]
old_log_prob = model_inputs["old_log_probs"]
advantages = model_inputs["advantages"]

entropy_coeff = self.config.entropy_coeff
Expand All @@ -424,6 +425,11 @@ def update_policy(self, data: DataProto):
model_inputs, temperature=temperature, calculate_entropy=calculate_entropy
)

if on_policy:
old_log_prob = log_prob.detach()
else:
old_log_prob = model_inputs["old_log_probs"]

loss_mode = self.config.policy_loss.get("loss_mode", "vanilla")
# vanilla -> verl.trainer.ppo.core_algos.compute_policy_loss_vanilla
# gpg -> verl.trainer.ppo.core_algos.compute_policy_loss_gpg
Expand Down
Loading