@@ -364,8 +364,7 @@ def dynamic_lstm(input,
364364 cell_activation(str): The activation for cell output. Choices = ["sigmoid",
365365 "tanh", "relu", "identity"], default "tanh".
366366 candidate_activation(str): The activation for candidate hidden state.
367- Choices = ["sigmoid", "tanh",
368- "relu", "identity"],
367+ Choices = ["sigmoid", "tanh", "relu", "identity"],
369368 default "tanh".
370369 dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
371370 name(str|None): A name for this layer(optional). If set None, the layer
@@ -540,27 +539,31 @@ def dynamic_lstmp(input,
540539 cell_activation(str): The activation for cell output. Choices = ["sigmoid",
541540 "tanh", "relu", "identity"], default "tanh".
542541 candidate_activation(str): The activation for candidate hidden state.
543- Choices = ["sigmoid", "tanh",
544- "relu", "identity"],
542+ Choices = ["sigmoid", "tanh", "relu", "identity"],
545543 default "tanh".
546544 proj_activation(str): The activation for projection output.
547- Choices = ["sigmoid", "tanh",
548- "relu", "identity"],
545+ Choices = ["sigmoid", "tanh", "relu", "identity"],
549546 default "tanh".
550547 dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
551548 name(str|None): A name for this layer(optional). If set None, the layer
552549 will be named automatically.
553550
554551 Returns:
555- tuple: The projection of hidden state, and cell state of LSTMP. The \
556- shape of projection is (T x P), for the cell state which is \
557- (T x D), and both LoD is the same with the `input`.
552+ tuple: A tuple of two output variable: the projection of hidden state, \
553+ and cell state of LSTMP. The shape of projection is (T x P), \
554+ for the cell state which is (T x D), and both LoD is the same \
555+ with the `input`.
558556
559557 Examples:
558+
560559 .. code-block:: python
561560
561+ dict_dim, emb_dim = 128, 64
562+ data = fluid.layers.data(name='sequence', shape=[1],
563+ dtype='int32', lod_level=1)
564+ emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
562565 hidden_dim, proj_dim = 512, 256
563- fc_out = fluid.layers.fc(input=input_seq , size=hidden_dim * 4,
566+ fc_out = fluid.layers.fc(input=emb , size=hidden_dim * 4,
564567 act=None, bias_attr=None)
565568 proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
566569 size=hidden_dim * 4,
@@ -626,10 +629,10 @@ def dynamic_gru(input,
626629 candidate_activation = 'tanh' ,
627630 h_0 = None ):
628631 """
629- **Dynamic GRU Layer**
632+ **Gated Recurrent Unit ( GRU) Layer**
630633
631634 Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on
632- Sequence Modeling <https://arxiv.org/abs/1412.3555>`_
635+ Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ .
633636
634637 The formula is as follows:
635638
@@ -676,17 +679,25 @@ def dynamic_gru(input,
676679 Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
677680 candidate_activation(str): The activation for candidate hidden state.
678681 Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
679- h_0 (Variable): The hidden output of the first time step.
682+ h_0 (Variable): This is initial hidden state. If not set, default is
683+ zero. This is a tensor with shape (N x D), where N is the number of
684+ total time steps of input mini-batch feature and D is the hidden
685+ size.
680686
681687 Returns:
682688 Variable: The hidden state of GRU. The shape is :math:`(T \\ times D)`, \
683- and lod is the same with the input.
689+ and sequence length is the same with the input.
684690
685691 Examples:
692+
686693 .. code-block:: python
687694
695+ dict_dim, emb_dim = 128, 64
696+ data = fluid.layers.data(name='sequence', shape=[1],
697+ dtype='int32', lod_level=1)
698+ emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
688699 hidden_dim = 512
689- x = fluid.layers.fc(input=data , size=hidden_dim * 3)
700+ x = fluid.layers.fc(input=emb , size=hidden_dim * 3)
690701 hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim)
691702 """
692703
@@ -924,13 +935,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
924935
925936 Drop or keep each element of `x` independently. Dropout is a regularization
926937 technique for reducing overfitting by preventing neuron co-adaption during
927- training. The dropout operator randomly set (according to the given dropout
938+ training. The dropout operator randomly sets (according to the given dropout
928939 probability) the outputs of some units to zero, while others are remain
929940 unchanged.
930941
931942 Args:
932- x (Variable): The input tensor.
933- dropout_prob (float): Probability of setting units to zero.
943+ x (Variable): The input tensor variable .
944+ dropout_prob (float): Probability of setting units to zero.
934945 is_test (bool): A flag indicating whether it is in test phrase or not.
935946 seed (int): A Python integer used to create random seeds. If this
936947 parameter is set to None, a random seed is used.
@@ -940,13 +951,14 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
940951 will be named automatically.
941952
942953 Returns:
943- Variable: A tensor variable.
954+ Variable: A tensor variable is the shape with `x` .
944955
945956 Examples:
957+
946958 .. code-block:: python
947959
948- x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
949- droped = fluid.layers.dropout(input= x, dropout_rate =0.5)
960+ x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
961+ droped = fluid.layers.dropout(x, dropout_prob =0.5)
950962 """
951963
952964 helper = LayerHelper ('dropout' , ** locals ())
@@ -2990,32 +3002,33 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
29903002 norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
29913003
29923004 .. math::
2993- y = \f rac{x}{ \sqrt{\sum {x^2} + epsion }}
3005+
3006+ y = \\ frac{x}{ \sqrt{\sum {x^2} + epsion }}
29943007
29953008 For `x` with more dimensions, this layer independently normalizes each 1-D
29963009 slice along dimension `axis`.
29973010
29983011 Args:
29993012 x(Variable|list): The input tensor to l2_normalize layer.
3000- axis(int): The axis on which to apply normalization. If `axis < 0`,
3013+ axis(int): The axis on which to apply normalization. If `axis < 0`, \
30013014 the dimension to normalization is rank(X) + axis. -1 is the
30023015 last dimension.
3003- epsilon(float): The epsilon value is used to avoid division by zero,
3016+ epsilon(float): The epsilon value is used to avoid division by zero, \
30043017 the defalut value is 1e-10.
3005- name(str|None): A name for this layer(optional). If set None, the layer
3018+ name(str|None): A name for this layer(optional). If set None, the layer \
30063019 will be named automatically.
30073020
3008-
30093021 Returns:
3010- Variable: The output tensor variable.
3022+ Variable: The output tensor variable is the same shape with `x` .
30113023
30123024 Examples:
3025+
30133026 .. code-block:: python
30143027
3015- data = fluid.layers.data(name="data",
3016- shape=(3, 17, 13),
3017- dtype="float32")
3018- normed = fluid.layers.l2_normalize(x=data, axis=1)
3028+ data = fluid.layers.data(name="data",
3029+ shape=(3, 17, 13),
3030+ dtype="float32")
3031+ normed = fluid.layers.l2_normalize(x=data, axis=1)
30193032 """
30203033
30213034 if len (x .shape ) == 1 :
0 commit comments