From 0cd330770c5f2d0ccbe29eb3dfb32b748a866e6c Mon Sep 17 00:00:00 2001 From: MRXLT Date: Wed, 19 Aug 2020 16:56:59 +0800 Subject: [PATCH 01/21] for new optimizer --- doc/fluid/api/gen_doc.py | 3 ++- doc/fluid/api/optimizer.rst | 2 ++ doc/fluid/api/optimizer/Adadelta.rst | 5 ++--- doc/fluid/api/optimizer/Adagrad.rst | 5 ++--- doc/fluid/api/optimizer/Adam.rst | 5 ++--- doc/fluid/api/optimizer/AdamW.rst | 13 +++++++++++++ doc/fluid/api/optimizer/DecayedAdagrad.rst | 5 ++--- doc/fluid/api/optimizer/Dpsgd.rst | 5 ++--- .../api/optimizer/ExponentialMovingAverage.rst | 5 ++--- doc/fluid/api/optimizer/Ftrl.rst | 5 ++--- doc/fluid/api/optimizer/Lamb.rst | 13 +++++++++++++ doc/fluid/api/optimizer/LarsMomentum.rst | 5 ++--- doc/fluid/api/optimizer/LookaheadOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/ModelAverage.rst | 5 ++--- doc/fluid/api/optimizer/Momentum.rst | 5 ++--- doc/fluid/api/optimizer/Optimizer.rst | 13 +++++++++++++ doc/fluid/api/optimizer/PipelineOptimizer.rst | 13 +++++++++++++ doc/fluid/api/optimizer/RMSProp.rst | 13 +++++++++++++ doc/fluid/api/optimizer/RecomputeOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/SGD.rst | 5 ++--- 20 files changed, 95 insertions(+), 40 deletions(-) create mode 100644 doc/fluid/api/optimizer/AdamW.rst create mode 100644 doc/fluid/api/optimizer/Lamb.rst create mode 100644 doc/fluid/api/optimizer/Optimizer.rst create mode 100644 doc/fluid/api/optimizer/PipelineOptimizer.rst create mode 100644 doc/fluid/api/optimizer/RMSProp.rst diff --git a/doc/fluid/api/gen_doc.py b/doc/fluid/api/gen_doc.py index 65e92c908b4..83123214244 100644 --- a/doc/fluid/api/gen_doc.py +++ b/doc/fluid/api/gen_doc.py @@ -21,7 +21,8 @@ import paddle.fluid as fluid import paddle.tensor as tensor import paddle.nn as nn -import paddle.complex as complex +import paddle.optimizer as optimizer +#import paddle.complex as complex #import paddle.framework as framework def parse_arg(): diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index 06ccc695574..d99e3c39fd9 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -32,3 +32,5 @@ paddle.optimizer optimizer/RMSPropOptimizer.rst optimizer/SGD.rst optimizer/SGDOptimizer.rst + optimizer/AdamW.rst + optimizer/Optimizer.rst diff --git a/doc/fluid/api/optimizer/Adadelta.rst b/doc/fluid/api/optimizer/Adadelta.rst index cba6c6fc6f6..3d4d505c5a3 100644 --- a/doc/fluid/api/optimizer/Adadelta.rst +++ b/doc/fluid/api/optimizer/Adadelta.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adadelta: +.. _api_optimizer_Adadelta: Adadelta -------- -.. autoclass:: paddle.fluid.optimizer.Adadelta +.. autoclass:: paddle.optimizer.Adadelta :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Adagrad.rst b/doc/fluid/api/optimizer/Adagrad.rst index b955fa9f7df..deef2879fbe 100644 --- a/doc/fluid/api/optimizer/Adagrad.rst +++ b/doc/fluid/api/optimizer/Adagrad.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adagrad: +.. _api_optimizer_Adagrad: Adagrad ------- -.. autoclass:: paddle.fluid.optimizer.Adagrad +.. autoclass:: paddle.optimizer.Adagrad :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Adam.rst b/doc/fluid/api/optimizer/Adam.rst index 4c7f6fd062f..f9bbe07dc85 100644 --- a/doc/fluid/api/optimizer/Adam.rst +++ b/doc/fluid/api/optimizer/Adam.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adam: +.. _api_optimizer_Adam: Adam ---- -.. autoclass:: paddle.fluid.optimizer.Adam +.. autoclass:: paddle.optimizer.Adam :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdamW.rst b/doc/fluid/api/optimizer/AdamW.rst new file mode 100644 index 00000000000..0f2b8f84f53 --- /dev/null +++ b/doc/fluid/api/optimizer/AdamW.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_adamw_AdamW: + +AdamW +----- + +.. autoclass:: paddle.optimizer.adamw.AdamW + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/DecayedAdagrad.rst b/doc/fluid/api/optimizer/DecayedAdagrad.rst index f2b37dda5cf..e3f1c574d8b 100644 --- a/doc/fluid/api/optimizer/DecayedAdagrad.rst +++ b/doc/fluid/api/optimizer/DecayedAdagrad.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DecayedAdagrad: +.. _api_optimizer_DecayedAdagrad: DecayedAdagrad -------------- -.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad +.. autoclass:: paddle.optimizer.DecayedAdagrad :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Dpsgd.rst b/doc/fluid/api/optimizer/Dpsgd.rst index 161606af2c5..f8fbfbf653a 100644 --- a/doc/fluid/api/optimizer/Dpsgd.rst +++ b/doc/fluid/api/optimizer/Dpsgd.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Dpsgd: +.. _api_optimizer_Dpsgd: Dpsgd ----- -.. autoclass:: paddle.fluid.optimizer.Dpsgd +.. autoclass:: paddle.optimizer.Dpsgd :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst index 41f2b39ae95..173608910d2 100644 --- a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst +++ b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_ExponentialMovingAverage: +.. _api_optimizer_ExponentialMovingAverage: ExponentialMovingAverage ------------------------ -.. autoclass:: paddle.fluid.optimizer.ExponentialMovingAverage +.. autoclass:: paddle.optimizer.ExponentialMovingAverage :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Ftrl.rst b/doc/fluid/api/optimizer/Ftrl.rst index f8bcb617dbf..85a5ab6eee3 100644 --- a/doc/fluid/api/optimizer/Ftrl.rst +++ b/doc/fluid/api/optimizer/Ftrl.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Ftrl: +.. _api_optimizer_Ftrl: Ftrl ---- -.. autoclass:: paddle.fluid.optimizer.Ftrl +.. autoclass:: paddle.optimizer.Ftrl :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Lamb.rst b/doc/fluid/api/optimizer/Lamb.rst new file mode 100644 index 00000000000..242eecd2ba3 --- /dev/null +++ b/doc/fluid/api/optimizer/Lamb.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_Lamb: + +Lamb +---- + +.. autoclass:: paddle.optimizer.Lamb + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/LarsMomentum.rst b/doc/fluid/api/optimizer/LarsMomentum.rst index 396f93416f8..199afcd78c6 100644 --- a/doc/fluid/api/optimizer/LarsMomentum.rst +++ b/doc/fluid/api/optimizer/LarsMomentum.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LarsMomentum: +.. _api_optimizer_LarsMomentum: LarsMomentum ------------ -.. autoclass:: paddle.fluid.optimizer.LarsMomentum +.. autoclass:: paddle.optimizer.LarsMomentum :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LookaheadOptimizer.rst b/doc/fluid/api/optimizer/LookaheadOptimizer.rst index e87be3eefdf..663b5662cee 100644 --- a/doc/fluid/api/optimizer/LookaheadOptimizer.rst +++ b/doc/fluid/api/optimizer/LookaheadOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LookaheadOptimizer: +.. _api_optimizer_LookaheadOptimizer: LookaheadOptimizer ------------------ -.. autoclass:: paddle.fluid.optimizer.LookaheadOptimizer +.. autoclass:: paddle.optimizer.LookaheadOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/ModelAverage.rst b/doc/fluid/api/optimizer/ModelAverage.rst index 86e458ef704..5a67fc1b195 100644 --- a/doc/fluid/api/optimizer/ModelAverage.rst +++ b/doc/fluid/api/optimizer/ModelAverage.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_ModelAverage: +.. _api_optimizer_ModelAverage: ModelAverage ------------ -.. autoclass:: paddle.fluid.optimizer.ModelAverage +.. autoclass:: paddle.optimizer.ModelAverage :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Momentum.rst b/doc/fluid/api/optimizer/Momentum.rst index 7b54d7cc1fc..f0ef70be9ee 100644 --- a/doc/fluid/api/optimizer/Momentum.rst +++ b/doc/fluid/api/optimizer/Momentum.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Momentum: +.. _api_optimizer_Momentum: Momentum -------- -.. autoclass:: paddle.fluid.optimizer.Momentum +.. autoclass:: paddle.optimizer.Momentum :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Optimizer.rst b/doc/fluid/api/optimizer/Optimizer.rst new file mode 100644 index 00000000000..1ef98eab55c --- /dev/null +++ b/doc/fluid/api/optimizer/Optimizer.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_Optimizer: + +Optimizer +--------- + +.. autoclass:: paddle.optimizer.Optimizer + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/PipelineOptimizer.rst b/doc/fluid/api/optimizer/PipelineOptimizer.rst new file mode 100644 index 00000000000..7488bf8a3e0 --- /dev/null +++ b/doc/fluid/api/optimizer/PipelineOptimizer.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_PipelineOptimizer: + +PipelineOptimizer +----------------- + +.. autoclass:: paddle.optimizer.PipelineOptimizer + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/RMSProp.rst b/doc/fluid/api/optimizer/RMSProp.rst new file mode 100644 index 00000000000..903acc26a33 --- /dev/null +++ b/doc/fluid/api/optimizer/RMSProp.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_RMSProp: + +RMSProp +------- + +.. autoclass:: paddle.optimizer.RMSProp + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/RecomputeOptimizer.rst b/doc/fluid/api/optimizer/RecomputeOptimizer.rst index 479037eebbb..4891f58c146 100644 --- a/doc/fluid/api/optimizer/RecomputeOptimizer.rst +++ b/doc/fluid/api/optimizer/RecomputeOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_RecomputeOptimizer: +.. _api_optimizer_RecomputeOptimizer: RecomputeOptimizer ------------------ -.. autoclass:: paddle.fluid.optimizer.RecomputeOptimizer +.. autoclass:: paddle.optimizer.RecomputeOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/SGD.rst b/doc/fluid/api/optimizer/SGD.rst index fa18269ad75..9af1bc8d60c 100644 --- a/doc/fluid/api/optimizer/SGD.rst +++ b/doc/fluid/api/optimizer/SGD.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_SGD: +.. _api_optimizer_SGD: SGD --- -.. autoclass:: paddle.fluid.optimizer.SGD +.. autoclass:: paddle.optimizer.SGD :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: From c9dacb66f74d3adbbcec8b709c95adcd881ef1fd Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 11:00:15 +0800 Subject: [PATCH 02/21] update en doc --- doc/fluid/api/optimizer/AdadeltaOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/AdagradOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/AdamOptimizer.rst | 14 -------------- doc/fluid/api/optimizer/AdamW.rst | 4 ++-- doc/fluid/api/optimizer/Adamax.rst | 5 ++--- doc/fluid/api/optimizer/AdamaxOptimizer.rst | 14 -------------- doc/fluid/api/optimizer/DGCMomentumOptimizer.rst | 5 ++--- .../api/optimizer/DecayedAdagradOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/DpsgdOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/FtrlOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/LambOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/LarsMomentumOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/MomentumOptimizer.rst | 5 ++--- doc/fluid/api/optimizer/RMSPropOptimizer.rst | 14 -------------- doc/fluid/api/optimizer/SGDOptimizer.rst | 5 ++--- 15 files changed, 24 insertions(+), 77 deletions(-) delete mode 100644 doc/fluid/api/optimizer/AdamOptimizer.rst delete mode 100644 doc/fluid/api/optimizer/AdamaxOptimizer.rst delete mode 100644 doc/fluid/api/optimizer/RMSPropOptimizer.rst diff --git a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst index 14692902f21..160a64b4d2d 100644 --- a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst +++ b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_AdadeltaOptimizer: +.. _api_optimizer_AdadeltaOptimizer: AdadeltaOptimizer ----------------- -.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer +.. autoclass:: paddle.optimizer.AdadeltaOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdagradOptimizer.rst b/doc/fluid/api/optimizer/AdagradOptimizer.rst index e52a9de102f..e125000739e 100644 --- a/doc/fluid/api/optimizer/AdagradOptimizer.rst +++ b/doc/fluid/api/optimizer/AdagradOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_AdagradOptimizer: +.. _api_optimizer_AdagradOptimizer: AdagradOptimizer ---------------- -.. autoclass:: paddle.fluid.optimizer.AdagradOptimizer +.. autoclass:: paddle.optimizer.AdagradOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdamOptimizer.rst b/doc/fluid/api/optimizer/AdamOptimizer.rst deleted file mode 100644 index 9a966f54c29..00000000000 --- a/doc/fluid/api/optimizer/AdamOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_AdamOptimizer: - -AdamOptimizer -------------- - -.. autoclass:: paddle.fluid.optimizer.AdamOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/AdamW.rst b/doc/fluid/api/optimizer/AdamW.rst index 0f2b8f84f53..c76eb48cdf1 100644 --- a/doc/fluid/api/optimizer/AdamW.rst +++ b/doc/fluid/api/optimizer/AdamW.rst @@ -1,12 +1,12 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_optimizer_adamw_AdamW: +.. _api_optimizer_AdamW: AdamW ----- -.. autoclass:: paddle.optimizer.adamw.AdamW +.. autoclass:: paddle.optimizer.AdamW :members: :inherited-members: :noindex: diff --git a/doc/fluid/api/optimizer/Adamax.rst b/doc/fluid/api/optimizer/Adamax.rst index 7f0ed493551..36fb8509f0b 100644 --- a/doc/fluid/api/optimizer/Adamax.rst +++ b/doc/fluid/api/optimizer/Adamax.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adamax: +.. _api_optimizer_Adamax: Adamax ------ -.. autoclass:: paddle.fluid.optimizer.Adamax +.. autoclass:: paddle.optimizer.Adamax :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdamaxOptimizer.rst b/doc/fluid/api/optimizer/AdamaxOptimizer.rst deleted file mode 100644 index b27b7aab44c..00000000000 --- a/doc/fluid/api/optimizer/AdamaxOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_AdamaxOptimizer: - -AdamaxOptimizer ---------------- - -.. autoclass:: paddle.fluid.optimizer.AdamaxOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst index 2305e30ef77..aa7a3517c38 100644 --- a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DGCMomentumOptimizer: +.. _api_optimizer_DGCMomentumOptimizer: DGCMomentumOptimizer -------------------- -.. autoclass:: paddle.fluid.optimizer.DGCMomentumOptimizer +.. autoclass:: paddle.optimizer.DGCMomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst index f0aa277010f..cf0d4452bec 100644 --- a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst +++ b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DecayedAdagradOptimizer: +.. _api_optimizer_DecayedAdagradOptimizer: DecayedAdagradOptimizer ----------------------- -.. autoclass:: paddle.fluid.optimizer.DecayedAdagradOptimizer +.. autoclass:: paddle.optimizer.DecayedAdagradOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/DpsgdOptimizer.rst b/doc/fluid/api/optimizer/DpsgdOptimizer.rst index d2462515a64..cbb1b8a9278 100644 --- a/doc/fluid/api/optimizer/DpsgdOptimizer.rst +++ b/doc/fluid/api/optimizer/DpsgdOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DpsgdOptimizer: +.. _api_optimizer_DpsgdOptimizer: DpsgdOptimizer -------------- -.. autoclass:: paddle.fluid.optimizer.DpsgdOptimizer +.. autoclass:: paddle.optimizer.DpsgdOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/FtrlOptimizer.rst b/doc/fluid/api/optimizer/FtrlOptimizer.rst index 3875801869f..fcbbcc52eaf 100644 --- a/doc/fluid/api/optimizer/FtrlOptimizer.rst +++ b/doc/fluid/api/optimizer/FtrlOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_FtrlOptimizer: +.. _api_optimizer_FtrlOptimizer: FtrlOptimizer ------------- -.. autoclass:: paddle.fluid.optimizer.FtrlOptimizer +.. autoclass:: paddle.optimizer.FtrlOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LambOptimizer.rst b/doc/fluid/api/optimizer/LambOptimizer.rst index db8fc7153c7..f661af2276b 100644 --- a/doc/fluid/api/optimizer/LambOptimizer.rst +++ b/doc/fluid/api/optimizer/LambOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LambOptimizer: +.. _api_optimizer_LambOptimizer: LambOptimizer ------------- -.. autoclass:: paddle.fluid.optimizer.LambOptimizer +.. autoclass:: paddle.optimizer.LambOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst index daf1631c128..a19d0025d49 100644 --- a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LarsMomentumOptimizer: +.. _api_optimizer_LarsMomentumOptimizer: LarsMomentumOptimizer --------------------- -.. autoclass:: paddle.fluid.optimizer.LarsMomentumOptimizer +.. autoclass:: paddle.optimizer.LarsMomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/MomentumOptimizer.rst b/doc/fluid/api/optimizer/MomentumOptimizer.rst index 1f0c6c63291..495c5fe91a0 100644 --- a/doc/fluid/api/optimizer/MomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/MomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_MomentumOptimizer: +.. _api_optimizer_MomentumOptimizer: MomentumOptimizer ----------------- -.. autoclass:: paddle.fluid.optimizer.MomentumOptimizer +.. autoclass:: paddle.optimizer.MomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/RMSPropOptimizer.rst b/doc/fluid/api/optimizer/RMSPropOptimizer.rst deleted file mode 100644 index 237c4ea71e4..00000000000 --- a/doc/fluid/api/optimizer/RMSPropOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_RMSPropOptimizer: - -RMSPropOptimizer ----------------- - -.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/SGDOptimizer.rst b/doc/fluid/api/optimizer/SGDOptimizer.rst index c6ec7ea6fd0..e36d63d41aa 100644 --- a/doc/fluid/api/optimizer/SGDOptimizer.rst +++ b/doc/fluid/api/optimizer/SGDOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_SGDOptimizer: +.. _api_optimizer_SGDOptimizer: SGDOptimizer ------------ -.. autoclass:: paddle.fluid.optimizer.SGDOptimizer +.. autoclass:: paddle.optimizer.SGDOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: From 5508d56a25777946316fd68060ca163033d2b1ee Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 12:05:31 +0800 Subject: [PATCH 03/21] update en doc --- doc/fluid/api/optimizer/Lamb.rst | 13 ------------- doc/fluid/api/optimizer/PipelineOptimizer.rst | 13 ------------- 2 files changed, 26 deletions(-) delete mode 100644 doc/fluid/api/optimizer/Lamb.rst delete mode 100644 doc/fluid/api/optimizer/PipelineOptimizer.rst diff --git a/doc/fluid/api/optimizer/Lamb.rst b/doc/fluid/api/optimizer/Lamb.rst deleted file mode 100644 index 242eecd2ba3..00000000000 --- a/doc/fluid/api/optimizer/Lamb.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_optimizer_Lamb: - -Lamb ----- - -.. autoclass:: paddle.optimizer.Lamb - :members: - :inherited-members: - :noindex: - diff --git a/doc/fluid/api/optimizer/PipelineOptimizer.rst b/doc/fluid/api/optimizer/PipelineOptimizer.rst deleted file mode 100644 index 7488bf8a3e0..00000000000 --- a/doc/fluid/api/optimizer/PipelineOptimizer.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_optimizer_PipelineOptimizer: - -PipelineOptimizer ------------------ - -.. autoclass:: paddle.optimizer.PipelineOptimizer - :members: - :inherited-members: - :noindex: - From e86929d69b132a480c4269c048d2300820133d25 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 12:08:24 +0800 Subject: [PATCH 04/21] update en doc --- doc/fluid/api/optimizer.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index d99e3c39fd9..fa549464d16 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -11,8 +11,7 @@ paddle.optimizer optimizer/AdagradOptimizer.rst optimizer/Adam.rst optimizer/Adamax.rst - optimizer/AdamaxOptimizer.rst - optimizer/AdamOptimizer.rst + optimizer/AdamW.rst optimizer/DecayedAdagrad.rst optimizer/DecayedAdagradOptimizer.rst optimizer/DGCMomentumOptimizer.rst @@ -29,8 +28,7 @@ paddle.optimizer optimizer/Momentum.rst optimizer/MomentumOptimizer.rst optimizer/RecomputeOptimizer.rst - optimizer/RMSPropOptimizer.rst + optimizer/RMSProp.rst optimizer/SGD.rst optimizer/SGDOptimizer.rst - optimizer/AdamW.rst optimizer/Optimizer.rst From 2a37c8494d502b7c80d17a9413e05cec7408d4fb Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 15:33:09 +0800 Subject: [PATCH 05/21] update cn doc --- doc/fluid/api/optimizer.rst | 4 +- doc/fluid/api_cn/optimizer_cn.rst | 6 +- .../api_cn/optimizer_cn/AdamOptimizer_cn.rst | 289 ------------------ doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 228 ++++++++++++++ doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 241 ++++++++++++++- .../optimizer_cn/AdamaxOptimizer_cn.rst | 229 -------------- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 214 ++++++++++++- .../api_cn/optimizer_cn/Optimizer_cn.rst | 206 +++++++++++++ ...RMSPropOptimizer_cn.rst => RMSProp_cn.rst} | 185 ++++++----- 9 files changed, 976 insertions(+), 626 deletions(-) delete mode 100644 doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst create mode 100644 doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst delete mode 100644 doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst create mode 100644 doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst rename doc/fluid/api_cn/optimizer_cn/{RMSPropOptimizer_cn.rst => RMSProp_cn.rst} (56%) diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index fa549464d16..2d443a6de6d 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -1,6 +1,6 @@ -=============== +================ paddle.optimizer -=============== +================ .. toctree:: :maxdepth: 1 diff --git a/doc/fluid/api_cn/optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn.rst index 766c9d885ef..c22f667f10e 100644 --- a/doc/fluid/api_cn/optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn.rst @@ -14,8 +14,7 @@ paddle.optimizer optimizer_cn/AdagradOptimizer_cn.rst optimizer_cn/Adam_cn.rst optimizer_cn/Adamax_cn.rst - optimizer_cn/AdamaxOptimizer_cn.rst - optimizer_cn/AdamOptimizer_cn.rst + optimizer_cn/AdamW_cn.rst optimizer_cn/DecayedAdagrad_cn.rst optimizer_cn/DecayedAdagradOptimizer_cn.rst optimizer_cn/DGCMomentumOptimizer_cn.rst @@ -32,6 +31,7 @@ paddle.optimizer optimizer_cn/Momentum_cn.rst optimizer_cn/MomentumOptimizer_cn.rst optimizer_cn/RecomputeOptimizer_cn.rst - optimizer_cn/RMSPropOptimizer_cn.rst + optimizer_cn/RMSProp_cn.rst optimizer_cn/SGD_cn.rst optimizer_cn/SGDOptimizer_cn.rst + optimizer_cn/Optimizer_cn.rst diff --git a/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst deleted file mode 100644 index 8e07f80b205..00000000000 --- a/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst +++ /dev/null @@ -1,289 +0,0 @@ -.. _cn_api_fluid_optimizer_AdamOptimizer: - -AdamOptimizer -------------------------------- - -.. py:class:: paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, grad_clip=None, name=None, lazy_mode=False) - - - - -Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 - -其参数更新的计算公式如下: - -.. math:: - \\t = t + 1 -.. math:: - moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad -.. math:: - moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad -.. math:: - learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} -.. math:: - param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ - -相关论文:`Adam: A Method for Stochastic Optimization `_ - -参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 - - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 - - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 - :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 - 默认值为None,此时将不进行梯度裁剪。 - - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False - - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - adam_optimizer = fluid.optimizer.AdamOptimizer(0.01) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - -.. code-block:: python - - # Adam with beta1/beta2 as Variable - import paddle - import paddle.fluid as fluid - import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - # define beta decay variable - def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate): - global_step = lr_scheduler._decay_step_counter() - - beta1 = fluid.layers.create_global_var( - shape=[1], - value=float(beta1_init), - dtype='float32', - # set persistable for save checkpoints and resume - persistable=True, - name="beta1") - beta2 = fluid.layers.create_global_var( - shape=[1], - value=float(beta2_init), - dtype='float32', - # set persistable for save checkpoints and resume - persistable=True, - name="beta2") - - div_res = global_step / decay_steps - decayed_beta1 = beta1_init * (decay_rate**div_res) - decayed_beta2 = beta2_init * (decay_rate**div_res) - fluid.layers.assign(decayed_beta1, beta1) - fluid.layers.assign(decayed_beta2, beta2) - - return beta1, beta2 - - beta1, beta2 = get_decayed_betas(0.9, 0.99, 1e5, 0.9) - adam_optimizer = fluid.optimizer.AdamOptimizer( - learning_rate=0.01, - beta1=beta1, - beta2=beta2) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - - -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) - -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 - -参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 - -返回类型: tuple - -**代码示例** - -.. code-block:: python - - import numpy - import paddle.fluid as fluid - - x = fluid.layers.data(name='X', shape=[13], dtype='float32') - y = fluid.layers.data(name='Y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - loss = fluid.layers.mean(cost) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.2) - adam.minimize(loss) - - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - x = numpy.random.random(size=(10, 13)).astype('float32') - y = numpy.random.random(size=(10, 1)).astype('float32') - exe.run(fluid.default_startup_program()) - outs = exe.run(program=fluid.default_main_program(), - feed={'X': x, 'Y': y}, - fetch_list=[loss.name]) - - -.. py:method:: clear_gradients() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - - -清除需要优化的参数的梯度。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.Adam(learning_rate=0.02, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() - -.. py:method:: set_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 - -参数: - value (float|Variable) - 需要设置的学习率的值。 - -返回:无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - with fluid.dygraph.guard(): - linear = fluid.dygraph.nn.Linear(10, 10) - adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) - # 通过Python float数值手动设置学习率 - lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] - for i in range(5): - adam.set_lr(lr_list[i]) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: - # current lr is 0.2 - # current lr is 0.3 - # current lr is 0.4 - # current lr is 0.5 - # current lr is 0.6 - - - # 通过 框架的Variable 设置学习率 - lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: - # current lr is 0.7 - - - -.. py:method:: current_step_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 - -返回:当前步骤的学习率。 - -返回类型:float - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 - - # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst new file mode 100644 index 00000000000..e8e635be072 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -0,0 +1,228 @@ +.. _cn_api_fluid_optimizer_AdamOptimizer: + +AdamOptimizer +------------------------------- + +.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.0, grad_clip=None, name=None, lazy_mode=False) + + + + +AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `,用来解决Adam优化器中L2正则化失效的问题。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*\(frac{moment\_1}{\sqrt{moment\_2}+\epsilon} + \lambda * param \\) + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 + - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **weight_decay** (float|Tensor) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Variable类型。默认值为0.0 + - **apply_decay_param_fun** (function|None): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.AdamW(weight_decay=0.01, learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + adam_optimizer = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.01) + adam_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + optimizer = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Tensor) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Tensor + lr_var = paddle.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: LearningRateDecay is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding([10, 10]) + adam = paddle.optimizer.AdamW(learning_rate=0.001, parameters = emb.parameters(),weight_decay=0.01) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = paddle.optimizer.AdamW(paddle.PiecewiseDecay(bd, value, 0), + parameters=linear.parameters(), + weight_decay=0.01) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst index 2c8f8e567ab..4f7a829bffc 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst @@ -1,16 +1,249 @@ -.. _cn_api_fluid_optimizer_Adam: +.. _cn_api_fluid_optimizer_AdamOptimizer: -Adam +AdamOptimizer ------------------------------- -.. py:attribute:: paddle.fluid.optimizer.Adam +.. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False) -``AdamOptimizer`` 的别名 +Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 +其参数更新的计算公式如下: +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ +相关论文:`Adam: A Method for Stochastic Optimization `_ +参数: + - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 + - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. code-block:: python + + # Adam with beta1/beta2 as Tensor and weight_decay as float + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + beta1=beta1, + beta2=beta2, + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + adam_optimizer = paddle.optimizer.AdamOptimizer(0.01) + adam_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Tensor) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Tensor + lr_var = paddle.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: LearningRateDecay is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding([10, 10]) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = paddle.optimizer.Adam(paddle.PiecewiseDecay(bd, value, 0), + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst deleted file mode 100644 index 1260ec166e3..00000000000 --- a/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. _cn_api_fluid_optimizer_AdamaxOptimizer: - -AdamaxOptimizer -------------------------------- - -.. py:class:: paddle.fluid.optimizer.AdamaxOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, grad_clip=None, name=None) - - - - -Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 - -其参数更新的计算公式如下: - -.. math:: - \\t = t + 1 -.. math:: - moment\_out=\beta_1∗moment+(1−\beta_1)∗grad -.. math:: - inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} -.. math:: - learning\_rate=\frac{learning\_rate}{1-\beta_1^t} -.. math:: - param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ - -相关论文:`Adam: A Method for Stochastic Optimization `_ - -论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 - -参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 - - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 - - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 - - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 - :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 - 默认值为None,此时将不进行梯度裁剪。 - - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - -.. note:: - 目前 ``AdamaxOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - # First create the Executor. - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2) - adam.minimize(loss) - - # Run the startup program once and only once. - exe.run(startup_program) - - x = numpy.random.random(size=(10, 1)).astype('float32') - outs = exe.run(program=train_program, - feed={'X': x}, - fetch_list=[loss.name]) - -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) - -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 - -参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None - -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 - -**代码示例**: - -.. code-block:: python - - import numpy - import paddle.fluid as fluid - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = fluid.optimizer.Adamax(learning_rate=0.2) - adam.minimize(loss) - - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - x = numpy.random.random(size=(10, 1)).astype('float32') - exe.run(fluid.default_startup_program()) - outs = exe.run(program=fluid.default_main_program(), - feed={'X': x}, - fetch_list=[loss.name]) - - - -.. py:method:: clear_gradients() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - - -清除需要优化的参数的梯度。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() - -.. py:method:: set_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 - -参数: - value (float|Variable) - 需要设置的学习率的值。 - -返回:无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - with fluid.dygraph.guard(): - linear = fluid.dygraph.nn.Linear(10, 10) - adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) - # 通过Python float数值手动设置学习率 - lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] - for i in range(5): - adam.set_lr(lr_list[i]) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: - # current lr is 0.2 - # current lr is 0.3 - # current lr is 0.4 - # current lr is 0.5 - # current lr is 0.6 - - - # 通过 框架的Variable 设置学习率 - lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: - # current lr is 0.7 - - - -.. py:method:: current_step_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 - -返回:当前步骤的学习率。 - -返回类型:float - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 - - # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 0413d5b1095..44de6c7df21 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -1,17 +1,223 @@ -.. _cn_api_fluid_optimizer_Adamax: +.. _cn_api_fluid_optimizer_AdamaxOptimizer: -Adamax +AdamaxOptimizer ------------------------------- -.. py:attribute:: paddle.fluid.optimizer.Adamax +.. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) -``AdamaxOptimizer`` 的别名 +Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 +其参数更新的计算公式如下: +.. math:: + \\t = t + 1 +.. math:: + moment\_out=\beta_1∗moment+(1−\beta_1)∗grad +.. math:: + inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} +.. math:: + learning\_rate=\frac{learning\_rate}{1-\beta_1^t} +.. math:: + param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ +相关论文:`Adam: A Method for Stochastic Optimization `_ +论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 +参数: + - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 + - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None +.. note:: + 目前 ``AdamaxOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +**代码示例** + +.. code-block:: python + + import numpy + import paddle.fluid as fluid + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + adam = paddle.optimizer.Adamax(learning_rate=0.2) + adam.minimize(loss) + + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(fluid.default_startup_program()) + outs = exe.run(program=fluid.default_main_program(), + feed={'X': x}, + fetch_list=[loss.name]) + + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + optimizer = paddle.optimizer.Adamax(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Tensor) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Tensor + lr_var = paddle.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle + # example1: LearningRateDecay is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding([10, 10]) + adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = paddle.optimizer.Adamax(paddle.PiecewiseDecay(bd, value, 0), + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst new file mode 100644 index 00000000000..2a580dfbfdb --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -0,0 +1,206 @@ +.. _cn_api_fluid_optimizer_AdamOptimizer: + +AdamOptimizer +------------------------------- + +.. py:class:: paddle.optimizer.Optimizer(learning_rate=0.001, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + +优化器的基类。 + +参数: + - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + adam_optimizer = paddle.optimizer.AdamOptimizer(0.01) + adam_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Tensor) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Tensor + lr_var = paddle.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: LearningRateDecay is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding([10, 10]) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = paddle.optimizer.Adam(paddle.PiecewiseDecay(bd, value, 0), + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst similarity index 56% rename from doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst rename to doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index ac30efa7823..ecc81800c2c 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -3,7 +3,7 @@ RMSPropOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.RMSPropOptimizer(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameter_list=None, regularization=None, grad_clip=None, name=None) +.. py:class:: paddle.optimizer.RMSPropOptimizer(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None) @@ -33,12 +33,12 @@ RMSPropOptimizer 参数: - **learning_rate** (float) - 全局学习率。 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。 - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 - - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 @@ -53,40 +53,33 @@ RMSPropOptimizer .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) - rms_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None 返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 @@ -110,7 +103,7 @@ RMSPropOptimizer cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) - rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) + rms_optimizer = paddle.optimizer.RMSProp(learning_rate=0.1) rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] @@ -137,19 +130,19 @@ RMSPropOptimizer .. code-block:: python - import paddle.fluid as fluid + import paddle import numpy as np - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.01, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + optimizer = paddle.optimizer.RMSProp(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() .. py:method:: set_lr() @@ -160,7 +153,7 @@ RMSPropOptimizer 手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: - value (float|Variable) - 需要设置的学习率的值。 + value (float|Tensor) - 需要设置的学习率的值。 返回:无 @@ -168,32 +161,36 @@ RMSPropOptimizer .. code-block:: python - import paddle.fluid as fluid - - with fluid.dygraph.guard(): - linear = fluid.dygraph.nn.Linear(10, 10) - adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) - # 通过Python float数值手动设置学习率 - lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] - for i in range(5): - adam.set_lr(lr_list[i]) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: - # current lr is 0.2 - # current lr is 0.3 - # current lr is 0.4 - # current lr is 0.5 - # current lr is 0.6 - - - # 通过 框架的Variable 设置学习率 - lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.RMSProp(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Tensor + lr_var = paddle.create_global_var( + shape=[1], value=0.7, dtype='float32') adam.set_lr(lr_var) - print("current lr is {}".format(adam.current_step_lr())) - # 打印结果: + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: # current lr is 0.7 - .. py:method:: current_step_lr() **注意:** @@ -210,36 +207,34 @@ RMSPropOptimizer .. code-block:: python - import paddle.fluid as fluid import numpy as np - + import paddle # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 + paddle.disable_static() + emb = paddle.nn.Embedding([10, 10]) + adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = paddle.optimizer.RMSProp(paddle.PiecewiseDecay(bd, value, 0), + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True From 3c2faeb32c3f2ec5a4b7d439a5df489de13d940e Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 20:30:03 +0800 Subject: [PATCH 06/21] fix cn doc --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 6 +++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index e8e635be072..fdea735b597 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -1,6 +1,6 @@ -.. _cn_api_fluid_optimizer_AdamOptimizer: +.. _cn_api_paddle_optimizer_AdamW: -AdamOptimizer +AdamW ------------------------------- .. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.0, grad_clip=None, name=None, lazy_mode=False) diff --git a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst index 4f7a829bffc..f11b597adf9 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst @@ -1,6 +1,6 @@ -.. _cn_api_fluid_optimizer_AdamOptimizer: +.. _cn_api_paddle_optimizer_Adam: -AdamOptimizer +Adam ------------------------------- .. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False) diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 44de6c7df21..52c7b47b826 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -1,6 +1,6 @@ -.. _cn_api_fluid_optimizer_AdamaxOptimizer: +.. _cn_api_paddle_optimizer_Adamax: -AdamaxOptimizer +Adamax ------------------------------- .. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 2a580dfbfdb..0f483b5151b 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -1,6 +1,6 @@ -.. _cn_api_fluid_optimizer_AdamOptimizer: +.. _cn_api_paddle_optimizer_Optimizer: -AdamOptimizer +Optimizer ------------------------------- .. py:class:: paddle.optimizer.Optimizer(learning_rate=0.001, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index ecc81800c2c..684849cf4d6 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -1,9 +1,9 @@ -.. _cn_api_fluid_optimizer_RMSPropOptimizer: +.. _cn_api_paddle_optimizer_RMSProp: -RMSPropOptimizer +RMSProp ------------------------------- -.. py:class:: paddle.optimizer.RMSPropOptimizer(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None) +.. py:class:: paddle.optimizer.RMSProp(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None) From 3b83b5918eca72699505bd0015c20aec45c48596 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 20 Aug 2020 21:30:36 +0800 Subject: [PATCH 07/21] update doc --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index fdea735b597..31a24e3a38b 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -21,7 +21,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ From 2afc7de4ab69f549fc68e24bce7fc69778dde3f4 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Fri, 21 Aug 2020 14:10:01 +0800 Subject: [PATCH 08/21] fix code style --- doc/fluid/api/gen_doc.py | 53 +++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/doc/fluid/api/gen_doc.py b/doc/fluid/api/gen_doc.py index 83123214244..1f4f3ad4933 100644 --- a/doc/fluid/api/gen_doc.py +++ b/doc/fluid/api/gen_doc.py @@ -22,24 +22,37 @@ import paddle.tensor as tensor import paddle.nn as nn import paddle.optimizer as optimizer + #import paddle.complex as complex #import paddle.framework as framework + def parse_arg(): parser = argparse.ArgumentParser() parser.add_argument('--submodules', nargs="*") parser.add_argument( - '--module_name', type=str, help='Generate the documentation of which module') + '--module_name', + type=str, + help='Generate the documentation of which module') parser.add_argument( '--module_prefix', type=str, help='Generate the prefix of module') parser.add_argument( - '--output', type=str, help='Output file or output directory for output rst') + '--output', + type=str, + help='Output file or output directory for output rst') parser.add_argument( - '--output_name', type=str, help='Output file or output directory for output rst') + '--output_name', + type=str, + help='Output file or output directory for output rst') parser.add_argument( - '--output_dir', type=str, help='Output file or output directory for output rst') + '--output_dir', + type=str, + help='Output file or output directory for output rst') parser.add_argument( - '--to_multiple_files', type=bool, default=False, help='Whether to separate to multiple files') + '--to_multiple_files', + type=bool, + default=False, + help='Whether to separate to multiple files') return parser.parse_args() @@ -54,8 +67,9 @@ def print_item(self, name): else: pass + class DocGenerator(object): - def __init__(self, module_name=None, module_prefix=None): + def __init__(self, module_name=None, module_prefix=None): self.module_name = module_name self.module_prefix = module_prefix self.stream = None @@ -63,7 +77,7 @@ def __init__(self, module_name=None, module_prefix=None): @contextlib.contextmanager def guard(self, filename): assert self.stream is None, "stream must be None" - self.stream = open(filename, 'w') + self.stream = open(filename, 'w') yield self.stream.close() self.stream = None @@ -71,14 +85,15 @@ def guard(self, filename): def print_submodule(self, submodule_name): submodule = getattr(self.module, submodule_name) if submodule is None: - raise ValueError("Cannot find submodule {0}".format(submodule_name)) + raise ValueError( + "Cannot find submodule {0}".format(submodule_name)) self.print_section(submodule_name) - for item in sorted(submodule.__all__,key=str.lower): + for item in sorted(submodule.__all__, key=str.lower): self.print_item(item) def print_current_module(self): - for item in sorted(self.module.__all__,key=str.lower): + for item in sorted(self.module.__all__, key=str.lower): self.print_item(item) def print_section(self, name): @@ -92,7 +107,7 @@ def print_item(self, name, output_name): self.print_method(name) else: self.stream.close() - path = os.getcwd()+"/"+output_name+"/"+name+".rst" + path = os.getcwd() + "/" + output_name + "/" + name + ".rst" if name != "PipeReader": os.remove(path) @@ -150,7 +165,9 @@ def _print_ref_(self, name): self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join( self.module_prefix.split(".")), name)) -def generate_doc(module_name, module_prefix, output, output_name, to_multiple_files, output_dir): + +def generate_doc(module_name, module_prefix, output, output_name, + to_multiple_files, output_dir): if module_name == "": module_name = None @@ -177,13 +194,14 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi else: gen.module_prefix = output_name + "." + module_prefix - dirname = output if to_multiple_files else os.path.dirname(output) + dirname = output if to_multiple_files else os.path.dirname(output) if output_dir != None: dirname = output_dir + "/" + dirname output = output_dir + "/" + output - if len(dirname) > 0 and (not os.path.exists(dirname) or not os.path.isdir(dirname)): + if len(dirname) > 0 and (not os.path.exists(dirname) or + not os.path.isdir(dirname)): os.makedirs(dirname) if not to_multiple_files: @@ -192,7 +210,7 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi prefix_len = len(gen.module_prefix) assert gen.module_prefix == gen.module_name[0:prefix_len], \ "module_prefix must be prefix of module_name" - diff_name = gen.module_name[prefix_len+1:] + diff_name = gen.module_name[prefix_len + 1:] if diff_name != "": header_name = diff_name else: @@ -204,7 +222,7 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi gen._print_header_(header_name, dot='=', is_title=True) gen.print_current_module() else: - apis = sorted(gen.module.__all__,key=str.lower) + apis = sorted(gen.module.__all__, key=str.lower) for api in apis: header_name = api with gen.guard(os.path.join(output, api + '.rst')): @@ -214,7 +232,8 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi def main(): args = parse_arg() - generate_doc(args.module_name, args.module_prefix, args.output, args.output_name, args.to_multiple_files, args.output_dir) + generate_doc(args.module_name, args.module_prefix, args.output, + args.output_name, args.to_multiple_files, args.output_dir) if __name__ == '__main__': From c61b91b3f75d3c0c53780e9e1da4c608656c5d79 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Fri, 21 Aug 2020 15:17:55 +0800 Subject: [PATCH 09/21] fix firmula of adamw --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 31a24e3a38b..0a10afefe08 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -21,7 +21,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ From bb7ddf8953cdea0d9a26e48e2099757caf680219 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 24 Aug 2020 13:12:04 +0800 Subject: [PATCH 10/21] update doc --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 52 ++++++++--- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 48 ++++++++-- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 52 ++++++++--- .../api_cn/optimizer_cn/Optimizer_cn.rst | 93 ++++++++++++------- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 30 +++++- 5 files changed, 205 insertions(+), 70 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 0a10afefe08..77c10ad382d 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -26,13 +26,13 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - - **weight_decay** (float|Tensor) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Variable类型。默认值为0.0 - - **apply_decay_param_fun** (function|None): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None + - **weight_decay** (float|Tensor, 可选) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Variable类型。默认值为0.0 + - **apply_decay_param_fun** (function|None, 可选): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None @@ -59,6 +59,34 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + adam = paddle.optimizer.AdamW(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + .. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -69,7 +97,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 相关论文:`Adam: A Method for Stochastic Optimization `_ 参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 @@ -84,6 +84,34 @@ Adam优化器出自 `Adam论文 `_ 的第二节 adam.step() adam.clear_grad() +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + .. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -94,7 +122,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -126,7 +154,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) -.. py:method:: clear_gradients() +.. py:method:: clear_grad() **注意:** @@ -151,7 +179,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 out = linear(a) out.backward() optimizer.step() - optimizer.clear_gradients() + optimizer.clear_grad() .. py:method:: set_lr() @@ -180,7 +208,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] for i in range(5): adam.set_lr(lr_list[i]) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.2 @@ -194,13 +222,13 @@ Adam优化器出自 `Adam论文 `_ 的第二节 lr_var = paddle.create_global_var( shape=[1], value=0.7, dtype='float32') adam.set_lr(lr_var) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.7 -.. py:method:: current_step_lr() +.. py:method:: get_lr() **注意:** @@ -222,7 +250,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) - lr = adam.current_step_lr() + lr = adam.get_lr() print(lr) # 0.001 # example2: PiecewiseDecay is used, return the step learning rate @@ -239,11 +267,11 @@ Adam优化器出自 `Adam论文 `_ 的第二节 parameters=linear.parameters()) # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True # learning rate for different steps ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.step() - lr = adam.current_step_lr() + lr = adam.get_lr() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 52c7b47b826..69e47932682 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -28,7 +28,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 @@ -41,7 +41,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None .. note:: - 目前 ``AdamaxOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + 目前 ``Adamax`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 **代码示例** @@ -56,13 +56,41 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 inp = paddle.to_tensor(inp) out = linear(inp) loss = paddle.mean(out) - adam = paddle.optimizer.Adam(learning_rate=0.1, + adam = paddle.optimizer.Adamax(learning_rate=0.1, parameters=linear.parameters()) out.backward() adam.step() adam.clear_grad() +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + .. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -73,7 +101,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -99,7 +127,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 -.. py:method:: clear_gradients() +.. py:method:: clear_grad() **注意:** @@ -124,7 +152,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 out = linear(a) out.backward() optimizer.step() - optimizer.clear_gradients() + optimizer.clear_grad() .. py:method:: set_lr() @@ -153,7 +181,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] for i in range(5): adam.set_lr(lr_list[i]) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.2 @@ -167,13 +195,13 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 lr_var = paddle.create_global_var( shape=[1], value=0.7, dtype='float32') adam.set_lr(lr_var) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.7 -.. py:method:: current_step_lr() +.. py:method:: get_lr() **注意:** @@ -196,7 +224,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) - lr = adam.current_step_lr() + lr = adam.get_lr() print(lr) # 0.001 # example2: PiecewiseDecay is used, return the step learning rate @@ -213,11 +241,11 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 parameters=linear.parameters()) # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True # learning rate for different steps ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.step() - lr = adam.current_step_lr() + lr = adam.get_lr() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 0f483b5151b..13c3e9e55a8 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -10,7 +10,7 @@ Optimizer 优化器的基类。 参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 @@ -19,13 +19,13 @@ Optimizer - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False **代码示例** .. code-block:: python + #以子类Adam为例 import paddle import numpy as np @@ -41,6 +41,35 @@ Optimizer adam.step() adam.clear_grad() +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + # This can be any optimizer supported by dygraph. + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + .. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -51,7 +80,7 @@ Optimizer - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -59,31 +88,24 @@ Optimizer .. code-block:: python - import paddle - import paddle.fluid as fluid - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - adam_optimizer = paddle.optimizer.AdamOptimizer(0.01) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - -.. py:method:: clear_gradients() + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() **注意:** @@ -108,7 +130,7 @@ Optimizer out = linear(a) out.backward() optimizer.step() - optimizer.clear_gradients() + optimizer.clear_grad() .. py:method:: set_lr() @@ -137,7 +159,7 @@ Optimizer lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] for i in range(5): adam.set_lr(lr_list[i]) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.2 @@ -151,13 +173,13 @@ Optimizer lr_var = paddle.create_global_var( shape=[1], value=0.7, dtype='float32') adam.set_lr(lr_var) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.7 -.. py:method:: current_step_lr() +.. py:method:: get_lr() **注意:** @@ -179,7 +201,7 @@ Optimizer paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) - lr = adam.current_step_lr() + lr = adam.get_lr() print(lr) # 0.001 # example2: PiecewiseDecay is used, return the step learning rate @@ -196,11 +218,12 @@ Optimizer parameters=linear.parameters()) # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True # learning rate for different steps ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.step() - lr = adam.current_step_lr() + lr = adam.get_lr() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 684849cf4d6..fdefc138a76 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -72,6 +72,34 @@ RMSProp adam.step() adam.clear_grad() +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5, dtype="float32") + adam = paddle.optimizer.RMSProp(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + .. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -82,7 +110,7 @@ RMSProp - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple From c9ba924cb67518f401b0d83df571e81253b6ea52 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 24 Aug 2020 15:03:53 +0800 Subject: [PATCH 11/21] update doc --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 8 ++-- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 48 ++++++++----------- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 4 +- .../api_cn/optimizer_cn/Optimizer_cn.rst | 2 +- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 2 +- 5 files changed, 29 insertions(+), 35 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 77c10ad382d..43e81e3ee55 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -28,10 +28,10 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 参数: - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 - - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; @@ -124,35 +124,29 @@ Adam优化器出自 `Adam论文 `_ 的第二节 返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 -返回类型: tuple **代码示例** .. code-block:: python - import paddle - import paddle.fluid as fluid - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - adam_optimizer = paddle.optimizer.AdamOptimizer(0.01) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + import paddle + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() .. py:method:: clear_grad() @@ -181,7 +175,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 optimizer.step() optimizer.clear_grad() -.. py:method:: set_lr() +.. py:method:: set_lr(value) **注意:** diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 69e47932682..5e5dc213bb7 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -33,7 +33,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 @@ -154,7 +154,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 optimizer.step() optimizer.clear_grad() -.. py:method:: set_lr() +.. py:method:: set_lr(value) **注意:** diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 13c3e9e55a8..9e3b5b2a892 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -132,7 +132,7 @@ Optimizer optimizer.step() optimizer.clear_grad() -.. py:method:: set_lr() +.. py:method:: set_lr(value) **注意:** diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index fdefc138a76..4c7662a4399 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -172,7 +172,7 @@ RMSProp optimizer.step() optimizer.clear_gradients() -.. py:method:: set_lr() +.. py:method:: set_lr(value) **注意:** From bfbe059afa19363664a74f0feced4bb18e9fb66a Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 24 Aug 2020 20:06:58 +0800 Subject: [PATCH 12/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 43e81e3ee55..01ec10cda13 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -3,7 +3,7 @@ AdamW ------------------------------- -.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.0, grad_clip=None, name=None, lazy_mode=False) +.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, grad_clip=None, name=None, lazy_mode=False) @@ -31,7 +31,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 Date: Tue, 25 Aug 2020 10:40:03 +0800 Subject: [PATCH 13/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 41 ++++++++----------- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 37 +++++++++-------- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 33 +++++++-------- .../api_cn/optimizer_cn/Optimizer_cn.rst | 3 ++ doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 37 +++++++---------- 5 files changed, 72 insertions(+), 79 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 01ec10cda13..8d73bfea769 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -107,29 +107,24 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 .. code-block:: python - import paddle - - paddle.disable_static() - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = paddle.nn.Linear(10, 10) - inp = paddle.to_tensor(inp) - out = linear(inp) - loss = paddle.mean(out) - - beta1 = paddle.to_tensor([0.9], dtype="float32") - beta2 = paddle.to_tensor([0.99], dtype="float32") - - adam = paddle.optimizer.Adam(learning_rate=0.1, - parameters=linear.parameters(), - weight_decay=0.01) - out.backward() - adam.minimize(loss) - adam.clear_grad() + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() .. py:method:: clear_grad() diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 5e5dc213bb7..a1a9d33454a 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -107,24 +107,25 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 .. code-block:: python - import numpy - import paddle.fluid as fluid - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = paddle.optimizer.Adamax(learning_rate=0.2) - adam.minimize(loss) + import paddle + import numpy as np - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - x = numpy.random.random(size=(10, 1)).astype('float32') - exe.run(fluid.default_startup_program()) - outs = exe.run(program=fluid.default_main_program(), - feed={'X': x}, - fetch_list=[loss.name]) + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() .. py:method:: clear_grad() diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 9e3b5b2a892..14f5227e229 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -88,6 +88,9 @@ Optimizer .. code-block:: python + import paddle + import numpy as np + paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 4c7662a4399..9e091b63f7f 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -119,31 +119,24 @@ RMSProp .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - rms_optimizer = paddle.optimizer.RMSProp(learning_rate=0.1) - rms_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() .. py:method:: clear_gradients() From 44866f0332fdf22e8e9f29c62d619c81e4ded828 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 25 Aug 2020 15:50:00 +0800 Subject: [PATCH 14/21] fix rmsprop sample code --- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 9e091b63f7f..cd8ba08dee3 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -193,7 +193,7 @@ RMSProp lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] for i in range(5): adam.set_lr(lr_list[i]) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.2 @@ -207,12 +207,12 @@ RMSProp lr_var = paddle.create_global_var( shape=[1], value=0.7, dtype='float32') adam.set_lr(lr_var) - lr = adam.current_step_lr() + lr = adam.get_lr() print("current lr is {}".format(lr)) # Print: # current lr is 0.7 -.. py:method:: current_step_lr() +.. py:method:: get_lr() **注意:** @@ -234,7 +234,7 @@ RMSProp paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) - lr = adam.current_step_lr() + lr = adam.get_lr() print(lr) # 0.001 # example2: PiecewiseDecay is used, return the step learning rate @@ -251,11 +251,11 @@ RMSProp parameters=linear.parameters()) # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True # learning rate for different steps ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.step() - lr = adam.current_step_lr() + lr = adam.get_lr() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True From ea1fa07a126a866d7514ff7f872d43bdabaffe37 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Fri, 28 Aug 2020 15:02:27 +0800 Subject: [PATCH 15/21] change learningratedecay to lrscheduler --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 14 ++++++++------ doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 14 ++++++++------ doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 14 ++++++++------ doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 14 ++++++++------ doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 7 ++++--- 5 files changed, 36 insertions(+), 27 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 8d73bfea769..11f617e46e5 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -26,7 +26,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 参数: - - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 @@ -160,7 +160,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ **模式下生效** -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: value (float|Tensor) - 需要设置的学习率的值。 @@ -208,7 +208,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ **模式下生效** -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 返回:当前步骤的学习率。 @@ -220,14 +220,14 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 相关论文:`Adam: A Method for Stochastic Optimization `_ 参数: - - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 @@ -182,7 +182,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: value (float|Tensor) - 需要设置的学习率的值。 @@ -229,7 +229,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 返回:当前步骤的学习率。 @@ -241,14 +241,14 @@ Adam优化器出自 `Adam论文 `_ 的第二节 import numpy as np import paddle - # example1: LearningRateDecay is not used, return value is all the same + # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 - # example2: PiecewiseDecay is used, return the step learning rate + # example2: PiecewiseLR is used, return the step learning rate paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) @@ -258,7 +258,8 @@ Adam优化器出自 `Adam论文 `_ 的第二节 bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = paddle.optimizer.Adam(paddle.PiecewiseDecay(bd, value, 0), + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, parameters=linear.parameters()) # first step: learning rate is 0.2 @@ -269,4 +270,5 @@ Adam优化器出自 `Adam论文 `_ 的第二节 for i in range(12): adam.step() lr = adam.get_lr() + scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index a1a9d33454a..a0e28fcd913 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -28,7 +28,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 参数: - - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 @@ -161,7 +161,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: value (float|Tensor) - 需要设置的学习率的值。 @@ -208,7 +208,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 返回:当前步骤的学习率。 @@ -221,14 +221,14 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 import numpy as np import paddle - # example1: LearningRateDecay is not used, return value is all the same + # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 - # example2: PiecewiseDecay is used, return the step learning rate + # example2: PiecewiseLR is used, return the step learning rate paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) @@ -238,7 +238,8 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = paddle.optimizer.Adamax(paddle.PiecewiseDecay(bd, value, 0), + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adamax(scheduler, parameters=linear.parameters()) # first step: learning rate is 0.2 @@ -249,4 +250,5 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 for i in range(12): adam.step() lr = adam.get_lr() + scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 14f5227e229..e8dde7991cd 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -10,7 +10,7 @@ Optimizer 优化器的基类。 参数: - - **learning_rate** (float|LearningrateDecay) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个LearningrateDecay类,默认值为0.001 + - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 @@ -141,7 +141,7 @@ Optimizer **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: value (float|Tensor) - 需要设置的学习率的值。 @@ -188,7 +188,7 @@ Optimizer **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 返回:当前步骤的学习率。 @@ -200,14 +200,14 @@ Optimizer import numpy as np import paddle - # example1: LearningRateDecay is not used, return value is all the same + # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 - # example2: PiecewiseDecay is used, return the step learning rate + # example2: PiecewiseLR is used, return the step learning rate paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) @@ -217,7 +217,8 @@ Optimizer bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = paddle.optimizer.Adam(paddle.PiecewiseDecay(bd, value, 0), + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, parameters=linear.parameters()) # first step: learning rate is 0.2 @@ -228,5 +229,6 @@ Optimizer for i in range(12): adam.step() lr = adam.get_lr() + scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index cd8ba08dee3..094718fba28 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -171,7 +171,7 @@ RMSProp **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: value (float|Tensor) - 需要设置的学习率的值。 @@ -218,7 +218,7 @@ RMSProp **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 返回:当前步骤的学习率。 @@ -230,7 +230,7 @@ RMSProp import numpy as np import paddle - # example1: LearningRateDecay is not used, return value is all the same + # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() emb = paddle.nn.Embedding([10, 10]) adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) @@ -258,4 +258,5 @@ RMSProp for i in range(12): adam.step() lr = adam.get_lr() + scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True From b19281bf9c71bfdd98370051750f681d97575a77 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 31 Aug 2020 11:35:36 +0800 Subject: [PATCH 16/21] fix doc --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 2 +- doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 3 +-- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 4 ++-- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 11f617e46e5..96c46c19441 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -27,10 +27,10 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 参数: - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 @@ -185,7 +185,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: - value (float|Tensor) - 需要设置的学习率的值。 + value (float) - 需要设置的学习率的值。 返回:无 diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index a0e28fcd913..f9985a74645 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -164,7 +164,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: - value (float|Tensor) - 需要设置的学习率的值。 + value (float) - 需要设置的学习率的值。 返回:无 diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index e8dde7991cd..06e9100bba5 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -12,7 +12,6 @@ Optimizer 参数: - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 @@ -144,7 +143,7 @@ Optimizer 手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: - value (float|Tensor) - 需要设置的学习率的值。 + value (float) - 需要设置的学习率的值。 返回:无 diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 094718fba28..f51d6a04b75 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -33,11 +33,11 @@ RMSProp 参数: - **learning_rate** (float) - 全局学习率。 - - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。 - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 @@ -174,7 +174,7 @@ RMSProp 手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 参数: - value (float|Tensor) - 需要设置的学习率的值。 + value (float) - 需要设置的学习率的值。 返回:无 From bd4a33020c6e0b16117d92f13bdfadb36327b031 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 31 Aug 2020 19:38:59 +0800 Subject: [PATCH 17/21] fix return --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 8 +++----- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 5 ++--- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 5 ++--- doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 6 ++---- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 6 ++---- 5 files changed, 11 insertions(+), 19 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 96c46c19441..37235a7ce12 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -3,7 +3,7 @@ AdamW ------------------------------- -.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, grad_clip=None, name=None, lazy_mode=False) +.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, apply_decay_param_fun=None, grad_clip=None, name=None, lazy_mode=False) @@ -100,7 +100,6 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 参数: value (float) - 需要设置的学习率的值。 -返回:无 +返回:None **代码示例** @@ -231,9 +231,8 @@ Adam优化器出自 `Adam论文 `_ 的第二节 获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 -返回:当前步骤的学习率。 +返回:float,当前步骤的学习率。 -返回类型:float **代码示例** diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index f9985a74645..db8af9f5b60 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -166,7 +166,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 参数: value (float) - 需要设置的学习率的值。 -返回:无 +返回:None **代码示例** @@ -210,9 +210,8 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 -返回:当前步骤的学习率。 +返回:float,当前步骤的学习率。 -返回类型:float **代码示例** diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 06e9100bba5..5a5b64b171a 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -81,7 +81,6 @@ Optimizer 返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 -返回类型: tuple **代码示例** @@ -145,7 +144,7 @@ Optimizer 参数: value (float) - 需要设置的学习率的值。 -返回:无 +返回:None **代码示例** @@ -189,9 +188,8 @@ Optimizer 获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 -返回:当前步骤的学习率。 +返回:float,当前步骤的学习率。 -返回类型:float **代码示例** diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index f51d6a04b75..ec5c65f3c1f 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -112,7 +112,6 @@ RMSProp 返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 -返回类型: tuple **示例代码** @@ -176,7 +175,7 @@ RMSProp 参数: value (float) - 需要设置的学习率的值。 -返回:无 +返回:None **代码示例** @@ -220,9 +219,8 @@ RMSProp 获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 -返回:当前步骤的学习率。 +返回:float,当前步骤的学习率。 -返回类型:float **代码示例** From 715ecdbb92291a3322759db3a62d37b01e141279 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 1 Sep 2020 13:12:32 +0800 Subject: [PATCH 18/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 4 ++-- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 37235a7ce12..5b6173b5428 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -79,7 +79,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.Adam(learning_rate = 0.01, parameters = linear.parameters()) out = linear(a) @@ -168,7 +168,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) optimizer = paddle.optimizer.Adam(learning_rate=0.02, parameters=linear.parameters()) out = linear(a) diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index db8af9f5b60..eeb183809c1 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -83,7 +83,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.Adam(learning_rate = 0.01, parameters = linear.parameters()) out = linear(a) @@ -147,7 +147,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) optimizer = paddle.optimizer.Adamax(learning_rate=0.02, parameters=linear.parameters()) out = linear(a) diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 5a5b64b171a..83506295849 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -60,7 +60,7 @@ Optimizer paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. adam = paddle.optimizer.Adam(learning_rate = 0.01, parameters = linear.parameters()) @@ -125,7 +125,7 @@ Optimizer paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) optimizer = paddle.optimizer.Adam(learning_rate=0.02, parameters=linear.parameters()) out = linear(a) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index ec5c65f3c1f..4dfa66bc7eb 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -92,7 +92,7 @@ RMSProp paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.RMSProp(learning_rate = 0.01, parameters = linear.parameters()) out = linear(a) @@ -156,7 +156,7 @@ RMSProp paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5, dtype="float32") + linear = paddle.nn.Linear(13, 5) optimizer = paddle.optimizer.RMSProp(learning_rate=0.02, parameters=linear.parameters()) out = linear(a) From 5180d0d20900dcb1d1df654dcddd706a3f4be531 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 1 Sep 2020 15:34:13 +0800 Subject: [PATCH 19/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst | 13 +------------ doc/fluid/api_cn/optimizer_cn/Adam_cn.rst | 13 +------------ doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst | 13 +------------ doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst | 13 +------------ doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 12 +----------- 5 files changed, 5 insertions(+), 59 deletions(-) diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst index 5b6173b5428..352cfcf892d 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -190,17 +190,6 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `_ 的第二节 # current lr is 0.5 # current lr is 0.6 - - # set learning rate manually by framework Tensor - lr_var = paddle.create_global_var( - shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # Print: - # current lr is 0.7 - - .. py:method:: get_lr() **注意:** @@ -242,7 +231,7 @@ Adam优化器出自 `Adam论文 `_ 的第二节 import paddle # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() - emb = paddle.nn.Embedding([10, 10]) + emb = paddle.nn.Embedding(10, 10, sparse=False) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index eeb183809c1..b38c4465712 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -191,17 +191,6 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 # current lr is 0.5 # current lr is 0.6 - - # set learning rate manually by framework Tensor - lr_var = paddle.create_global_var( - shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # Print: - # current lr is 0.7 - - .. py:method:: get_lr() **注意:** @@ -222,7 +211,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 import paddle # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() - emb = paddle.nn.Embedding([10, 10]) + emb = paddle.nn.Embedding(10, 10, sparse=False) adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst index 83506295849..5aa31be2965 100644 --- a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -169,17 +169,6 @@ Optimizer # current lr is 0.5 # current lr is 0.6 - - # set learning rate manually by framework Tensor - lr_var = paddle.create_global_var( - shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # Print: - # current lr is 0.7 - - .. py:method:: get_lr() **注意:** @@ -199,7 +188,7 @@ Optimizer import paddle # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() - emb = paddle.nn.Embedding([10, 10]) + emb = paddle.nn.Embedding(10, 10, sparse=False) adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 4dfa66bc7eb..cd745a3e3a3 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -201,16 +201,6 @@ RMSProp # current lr is 0.5 # current lr is 0.6 - - # set learning rate manually by framework Tensor - lr_var = paddle.create_global_var( - shape=[1], value=0.7, dtype='float32') - adam.set_lr(lr_var) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # Print: - # current lr is 0.7 - .. py:method:: get_lr() **注意:** @@ -230,7 +220,7 @@ RMSProp import paddle # example1: _LRScheduler is not used, return value is all the same paddle.disable_static() - emb = paddle.nn.Embedding([10, 10]) + emb = paddle.nn.Embedding(10, 10, sparse=False) adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) lr = adam.get_lr() print(lr) # 0.001 From 7bd76fc8c5b983bcf1206366c8063fbeb9c24e5a Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 1 Sep 2020 18:09:14 +0800 Subject: [PATCH 20/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index cd745a3e3a3..501c1687a6e 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -235,7 +235,7 @@ RMSProp bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = paddle.optimizer.RMSProp(paddle.PiecewiseDecay(bd, value, 0), + adam = paddle.optimizer.RMSProp(paddle.optimizer.PiecewiseLR(bd, value, 0), parameters=linear.parameters()) # first step: learning rate is 0.2 From 342fa9648e02527e7de4b7f044622eb668ef5637 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 1 Sep 2020 20:26:18 +0800 Subject: [PATCH 21/21] fix sample code --- doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst index 501c1687a6e..2439a95494d 100644 --- a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -235,7 +235,8 @@ RMSProp bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = paddle.optimizer.RMSProp(paddle.optimizer.PiecewiseLR(bd, value, 0), + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.RMSProp(scheduler, parameters=linear.parameters()) # first step: learning rate is 0.2