@@ -315,6 +315,8 @@ def fc(input,
315315 .. code-block:: python
316316
317317 import paddle.fluid as fluid
318+ import paddle
319+ paddle.enable_static()
318320 # when input is single tensor
319321 data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
320322 fc = fluid.layers.fc(input=data, size=1000, act="tanh")
@@ -468,6 +470,9 @@ def embedding(input,
468470
469471 import paddle.fluid as fluid
470472 import numpy as np
473+ import paddle
474+ paddle.enable_static()
475+
471476 data = fluid.data(name='x', shape=[None, 1], dtype='int64')
472477
473478 # example 1
@@ -731,6 +736,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
731736
732737 import paddle.fluid as fluid
733738 import numpy as np
739+ import paddle
740+ paddle.enable_static()
734741
735742 #define net structure, using LodTensor
736743 train_program = fluid.Program()
@@ -855,6 +862,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
855862 .. code-block:: python
856863
857864 import paddle.fluid as fluid
865+ import paddle
866+ paddle.enable_static()
858867
859868 # LoDTensor-based example
860869 num_labels = 10
@@ -1458,6 +1467,9 @@ def conv2d(input,
14581467 .. code-block:: python
14591468
14601469 import paddle.fluid as fluid
1470+ import paddle
1471+ paddle.enable_static()
1472+
14611473 data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
14621474 conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
14631475 """
@@ -1728,6 +1740,8 @@ def conv3d(input,
17281740 .. code-block:: python
17291741
17301742 import paddle.fluid as fluid
1743+ import paddle
1744+ paddle.enable_static()
17311745 data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
17321746 conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
17331747 """
@@ -2377,6 +2391,7 @@ def adaptive_pool2d(input,
23772391 # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
23782392 #
23792393 import paddle
2394+ paddle.enable_static()
23802395 data = paddle.rand(shape=[1,3,32,32])
23812396 pool_out = paddle.fluid.layers.adaptive_pool2d(
23822397 input=data,
@@ -2531,6 +2546,7 @@ def adaptive_pool3d(input,
25312546 #
25322547
25332548 import paddle
2549+ paddle.enable_static()
25342550 data = paddle.rand(shape=[1,3,32,32,32])
25352551 pool_out = paddle.fluid.layers.adaptive_pool3d(
25362552 input=data,
@@ -2726,6 +2742,8 @@ def batch_norm(input,
27262742 .. code-block:: python
27272743
27282744 import paddle.fluid as fluid
2745+ import paddle
2746+ paddle.enable_static()
27292747 x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
27302748 hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
27312749 hidden2 = fluid.layers.batch_norm(input=hidden1)
@@ -2735,6 +2753,8 @@ def batch_norm(input,
27352753 # batch_norm with momentum as Variable
27362754 import paddle.fluid as fluid
27372755 import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
2756+ import paddle
2757+ paddle.enable_static()
27382758
27392759 def get_decay_momentum(momentum_init, decay_steps, decay_rate):
27402760 global_step = lr_scheduler._decay_step_counter()
@@ -3134,6 +3154,8 @@ def instance_norm(input,
31343154 .. code-block:: python
31353155
31363156 import paddle.fluid as fluid
3157+ import paddle
3158+ paddle.enable_static()
31373159 x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
31383160 hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
31393161 hidden2 = fluid.layers.instance_norm(input=hidden1)
@@ -3269,6 +3291,7 @@ def data_norm(input,
32693291 .. code-block:: python
32703292
32713293 import paddle
3294+ paddle.enable_static()
32723295
32733296 x = paddle.randn(shape=[32,100])
32743297 hidden2 = paddle.static.nn.data_norm(input=x)
@@ -3451,6 +3474,8 @@ def layer_norm(input,
34513474
34523475 import paddle.fluid as fluid
34533476 import numpy as np
3477+ import paddle
3478+ paddle.enable_static()
34543479 x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
34553480 hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
34563481 place = fluid.CPUPlace()
@@ -3566,6 +3591,9 @@ def group_norm(input,
35663591 .. code-block:: python
35673592
35683593 import paddle.fluid as fluid
3594+ import paddle
3595+ paddle.enable_static()
3596+
35693597 data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
35703598 x = fluid.layers.group_norm(input=data, groups=4)
35713599 """
@@ -3887,6 +3915,8 @@ def conv2d_transpose(input,
38873915 .. code-block:: python
38883916
38893917 import paddle.fluid as fluid
3918+ import paddle
3919+ paddle.enable_static()
38903920 data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
38913921 conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
38923922 """
@@ -4177,6 +4207,8 @@ def conv3d_transpose(input,
41774207 .. code-block:: python
41784208
41794209 import paddle.fluid as fluid
4210+ import paddle
4211+ paddle.enable_static()
41804212 data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
41814213 conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
41824214 """
@@ -4659,7 +4691,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
46594691 This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
46604692
46614693 Args:
4662- input (Variable ): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
4694+ input (Tensor ): the input tensor, it's data type should be `bool`.
46634695 dim (list|int|optional): The dimension along which the logical and is computed.
46644696 If :attr:`None`, compute the logical and over all elements of
46654697 :attr:`input` and return a Tensor variable with a single element,
@@ -4672,27 +4704,28 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
46724704 will be named automatically. The default value is None.
46734705
46744706 Returns:
4675- Variable , the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
4707+ Tensor , the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
46764708
46774709 Examples:
46784710 .. code-block:: python
46794711
4712+ import paddle
46804713 import paddle.fluid as fluid
46814714 import paddle.fluid.layers as layers
46824715 import numpy as np
46834716
46844717 # x is a bool Tensor variable with following elements:
46854718 # [[True, False]
46864719 # [True, True]]
4687- x = layers .assign(np.array([[1, 0], [1, 1]], dtype='int32'))
4688- x = layers .cast(x, 'bool')
4720+ x = paddle .assign(np.array([[1, 0], [1, 1]], dtype='int32'))
4721+ x = paddle .cast(x, 'bool')
46894722
4690- out = layers .reduce_all(x) # False
4691- out = layers .reduce_all(x, dim=0) # [True, False]
4692- out = layers .reduce_all(x, dim=-1) # [False, True]
4723+ out = paddle .reduce_all(x) # False
4724+ out = paddle .reduce_all(x, dim=0) # [True, False]
4725+ out = paddle .reduce_all(x, dim=-1) # [False, True]
46934726 # keep_dim=False, x.shape=(2,2), out.shape=(2,)
46944727
4695- out = layers .reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
4728+ out = paddle .reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
46964729 # keep_dim=True, x.shape=(2,2), out.shape=(2,1)
46974730
46984731 """
@@ -4719,7 +4752,7 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
47194752 This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
47204753
47214754 Args:
4722- input (Variable ): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
4755+ input (Tensor ): the input tensor, it's data type should be `bool`.
47234756 dim (list|int|optional): The dimension along which the logical and is computed.
47244757 If :attr:`None`, compute the logical and over all elements of
47254758 :attr:`input` and return a Tensor variable with a single element,
@@ -4728,30 +4761,31 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
47284761 keep_dim (bool): Whether to reserve the reduced dimension in the
47294762 output Tensor. The result tensor will have one fewer dimension
47304763 than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
4731- name(str|None ): A name for this layer (optional). If set None, the layer
4764+ name (str, optional ): Name for the operation (optional, default is None ). For more information, please refer to :ref:`api_guide_Name`.
47324765
47334766 Returns:
4734- Variable , the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
4767+ Tensor , the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
47354768
47364769 Examples:
47374770 .. code-block:: python
47384771
4772+ import paddle
47394773 import paddle.fluid as fluid
47404774 import paddle.fluid.layers as layers
47414775 import numpy as np
47424776
47434777 # x is a bool Tensor variable with following elements:
47444778 # [[True, False]
47454779 # [False, False]]
4746- x = layers .assign(np.array([[1, 0], [0, 0]], dtype='int32'))
4747- x = layers .cast(x, 'bool')
4780+ x = paddle .assign(np.array([[1, 0], [0, 0]], dtype='int32'))
4781+ x = paddle .cast(x, 'bool')
47484782
4749- out = layers .reduce_any(x) # True
4750- out = layers .reduce_any(x, dim=0) # [True, False]
4751- out = layers .reduce_any(x, dim=-1) # [True, False]
4783+ out = paddle .reduce_any(x) # True
4784+ out = paddle .reduce_any(x, dim=0) # [True, False]
4785+ out = paddle .reduce_any(x, dim=-1) # [True, False]
47524786 # keep_dim=False, x.shape=(2,2), out.shape=(2,)
47534787
4754- out = layers .reduce_any(x, dim=1,
4788+ out = paddle .reduce_any(x, dim=1,
47554789 keep_dim=True) # [[True], [False]]
47564790 # keep_dim=True, x.shape=(2,2), out.shape=(2,1)
47574791
@@ -5613,6 +5647,8 @@ def im2sequence(input,
56135647 .. code-block:: python
56145648
56155649 import paddle.fluid as fluid
5650+ import paddle
5651+ paddle.enable_static()
56165652 data = fluid.data(name='data', shape=[None, 3, 32, 32],
56175653 dtype='float32')
56185654 output = fluid.layers.im2sequence(
@@ -5669,6 +5705,8 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
56695705 Examples:
56705706 >>> # for LodTensor inputs
56715707 >>> import paddle.fluid as fluid
5708+ >>> import paddle
5709+ >>> paddle.enable_static()
56725710 >>> x = fluid.data(name='x', shape=[9, 16],
56735711 >>> dtype='float32', lod_level=1)
56745712 >>> out = fluid.layers.row_conv(input=x, future_context_size=2)
@@ -5982,6 +6020,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
59826020 .. code-block:: python
59836021
59846022 import paddle.fluid as fluid
6023+ import paddle
6024+ paddle.enable_static()
59856025 global_step = fluid.layers.autoincreased_step_counter(
59866026 counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
59876027 """
@@ -9730,6 +9770,8 @@ def prelu(x, mode, param_attr=None, name=None):
97309770 .. code-block:: python
97319771
97329772 import paddle.fluid as fluid
9773+ import paddle
9774+ paddle.enable_static()
97339775 from paddle.fluid.param_attr import ParamAttr
97349776 x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
97359777 mode = 'channel'
@@ -14307,6 +14349,9 @@ def deformable_conv(input,
1430714349 #deformable conv v2:
1430814350
1430914351 import paddle.fluid as fluid
14352+ import paddle
14353+ paddle.enable_static()
14354+
1431014355 C_in, H_in, W_in = 3, 32, 32
1431114356 filter_size, deformable_groups = 3, 1
1431214357 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
0 commit comments