Skip to content
Merged
38 changes: 38 additions & 0 deletions libai/scheduler/lr_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,44 @@ def WarmupCosineAnnealingLR(
return warmup_cosine_annealing_lr


def WarmupStepLR(
optimizer: flow.optim.Optimizer,
max_iter: int,
warmup_factor: float,
warmup_iter: int,
step_size: int,
gamma: float = 0.1,
warmup_method: str = "linear",
):
"""Create a schedule with a learning rate that decreases following the values of the Step
function between the initial lr set in the optimizer to 0, after a warmup period during which
it increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (flow.optim.Optimizer): Wrapped optimizer.
max_iter (int): Total training iters.
warmup_factor (float): The warmup factor.
warmup_iter (int): The number of warmup steps.
step_size (int): Period of learning rate decay.
gamma (float, optional): Multiplicative factor of learning rate decay. Defaults to 0.1.
warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant".
In linear mode, the multiplication factor starts with warmup_factor in the first
epoch and then inreases linearly to reach 1. Defaults to "linear".
"""
step_lr = flow.optim.lr_scheduler.StepLR(
optimizer, step_size=step_size, gamma=gamma
)
if warmup_iter == 0:
logger.warning("warmup iters equals to zero, return StepLR")
return step_lr
warmup_step_lr = flow.optim.lr_scheduler.WarmUpLR(
step_lr,
warmup_factor=warmup_factor,
warmup_iters=warmup_iter,
warmup_method=warmup_method,
)
return warmup_step_lr


def WarmupMultiStepLR(
optimizer: flow.optim.Optimizer,
max_iter: int,
Expand Down