Skip to content

Commit a5f65d5

Browse files
authored
hapi/model step learning rate on batch end. (#27991)
* hapi/model step learning rate on batch end. test=develop
1 parent 5f04875 commit a5f65d5

File tree

2 files changed

+25
-5
lines changed

2 files changed

+25
-5
lines changed

python/paddle/hapi/model.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -453,6 +453,12 @@ def _run(self, inputs, labels=None):
453453
if len(name) > 0:
454454
rets.insert(i, feed[name])
455455

456+
# step learning rate scheduler on each batch end
457+
if self.model._optimizer and \
458+
isinstance(self.model._optimizer._learning_rate,
459+
paddle.optimizer.lr.LRScheduler):
460+
self.model._optimizer._learning_rate.step()
461+
456462
# LoDTensor cannot be fetch as numpy directly
457463
rets = [np.array(v) for v in rets]
458464
if self.mode == 'test':
@@ -652,6 +658,13 @@ def train_batch(self, inputs, labels=None):
652658

653659
self.model._optimizer.minimize(final_loss)
654660
self.model.network.clear_gradients()
661+
662+
# step learning rate scheduler on each batch end
663+
if self.model._optimizer and \
664+
isinstance(self.model._optimizer._learning_rate,
665+
paddle.optimizer.lr.LRScheduler):
666+
self.model._optimizer._learning_rate.step()
667+
655668
metrics = []
656669
for metric in self.model._metrics:
657670
metric_outs = metric.compute(*(to_list(outputs) + labels))
@@ -1461,11 +1474,6 @@ def fit(
14611474

14621475
cbks.on_end('eval', eval_logs)
14631476

1464-
# step learning rate scheduler on each epcoh end
1465-
if isinstance(self._optimizer._learning_rate,
1466-
paddle.optimizer.lr.LRScheduler):
1467-
self._optimizer._learning_rate.step()
1468-
14691477
cbks.on_end('train', logs)
14701478
self._test_dataloader = None
14711479

python/paddle/tests/test_model.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -631,6 +631,7 @@ def make_optimizer(parameters=None):
631631
parameters=parameters)
632632
return optimizer
633633

634+
# dynamic test
634635
device = paddle.set_device('cpu')
635636
fluid.enable_dygraph(device)
636637
net = MyModel()
@@ -643,8 +644,19 @@ def make_optimizer(parameters=None):
643644
dataset = MyDataset()
644645
model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)
645646

647+
# static test
646648
paddle.enable_static()
647649

650+
net = MyModel()
651+
inputs = [InputSpec([None, 20], 'float32', 'x')]
652+
labels = [InputSpec([None, 1], 'int64', 'label')]
653+
optim = make_optimizer(net.parameters())
654+
model = Model(net, inputs, labels)
655+
model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
656+
657+
dataset = MyDataset()
658+
model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)
659+
648660

649661
class TestRaiseError(unittest.TestCase):
650662
def test_input_without_name(self):

0 commit comments

Comments
 (0)