@@ -312,8 +312,7 @@ def dynamic_lstm(input,
312312 cell_activation(str): The activation for cell output. Choices = ["sigmoid",
313313 "tanh", "relu", "identity"], default "tanh".
314314 candidate_activation(str): The activation for candidate hidden state.
315- Choices = ["sigmoid", "tanh",
316- "relu", "identity"],
315+ Choices = ["sigmoid", "tanh", "relu", "identity"],
317316 default "tanh".
318317 dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
319318 name(str|None): A name for this layer(optional). If set None, the layer
@@ -488,35 +487,38 @@ def dynamic_lstmp(input,
488487 cell_activation(str): The activation for cell output. Choices = ["sigmoid",
489488 "tanh", "relu", "identity"], default "tanh".
490489 candidate_activation(str): The activation for candidate hidden state.
491- Choices = ["sigmoid", "tanh",
492- "relu", "identity"],
490+ Choices = ["sigmoid", "tanh", "relu", "identity"],
493491 default "tanh".
494492 proj_activation(str): The activation for projection output.
495- Choices = ["sigmoid", "tanh",
496- "relu", "identity"],
493+ Choices = ["sigmoid", "tanh", "relu", "identity"],
497494 default "tanh".
498495 dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
499496 name(str|None): A name for this layer(optional). If set None, the layer
500497 will be named automatically.
501498
502499 Returns:
503- tuple: The projection of hidden state, and cell state of LSTMP. The \
504- shape of projection is (T x P), for the cell state which is \
505- (T x D), and both LoD is the same with the `input`.
500+ tuple: A tuple of two output variable: the projection of hidden state, \
501+ and cell state of LSTMP. The shape of projection is (T x P), \
502+ for the cell state which is (T x D), and both LoD is the same \
503+ with the `input`.
506504
507505 Examples:
508506 .. code-block:: python
509507
510- hidden_dim, proj_dim = 512, 256
511- fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4,
512- act=None, bias_attr=None)
513- proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
514- size=hidden_dim * 4,
515- proj_size=proj_dim,
516- use_peepholes=False,
517- is_reverse=True,
518- cell_activation="tanh",
519- proj_activation="tanh")
508+ dict_dim, emb_dim = 128, 64
509+ data = fluid.layers.data(name='sequence', shape=(128),
510+ dtype='int32', lod_level=1)
511+ emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
512+ hidden_dim, proj_dim = 512, 256
513+ fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4,
514+ act=None, bias_attr=None)
515+ proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
516+ size=hidden_dim * 4,
517+ proj_size=proj_dim,
518+ use_peepholes=False,
519+ is_reverse=True,
520+ cell_activation="tanh",
521+ proj_activation="tanh")
520522 """
521523
522524 helper = LayerHelper ('lstmp' , ** locals ())
@@ -574,10 +576,10 @@ def dynamic_gru(input,
574576 candidate_activation = 'tanh' ,
575577 h_0 = None ):
576578 """
577- **Dynamic GRU Layer**
579+ **Gated Recurrent Unit ( GRU) Layer**
578580
579581 Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on
580- Sequence Modeling <https://arxiv.org/abs/1412.3555>`_
582+ Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ .
581583
582584 The formula is as follows:
583585
@@ -624,17 +626,24 @@ def dynamic_gru(input,
624626 Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
625627 candidate_activation(str): The activation for candidate hidden state.
626628 Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
627- h_0 (Variable): The hidden output of the first time step.
629+ h_0 (Variable): This is initial hidden state. If not set, default is
630+ zero. This is a tensor with shape (N x D), where N is the number of
631+ total time steps of input mini-batch feature and D is the hidden
632+ size.
628633
629634 Returns:
630635 Variable: The hidden state of GRU. The shape is :math:`(T \\ times D)`, \
631- and lod is the same with the input.
636+ and sequence length is the same with the input.
632637
633638 Examples:
634639 .. code-block:: python
635640
641+ dict_dim, emb_dim = 128, 64
642+ data = fluid.layers.data(name='sequence', shape=(128),
643+ dtype='int32', lod_level=1)
644+ emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
636645 hidden_dim = 512
637- x = fluid.layers.fc(input=data , size=hidden_dim * 3)
646+ x = fluid.layers.fc(input=emb , size=hidden_dim * 3)
638647 hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim)
639648 """
640649
@@ -872,13 +881,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
872881
873882 Drop or keep each element of `x` independently. Dropout is a regularization
874883 technique for reducing overfitting by preventing neuron co-adaption during
875- training. The dropout operator randomly set (according to the given dropout
884+ training. The dropout operator randomly sets (according to the given dropout
876885 probability) the outputs of some units to zero, while others are remain
877886 unchanged.
878887
879888 Args:
880- x (Variable): The input tensor.
881- dropout_prob (float): Probability of setting units to zero.
889+ x (Variable): The input tensor variable .
890+ dropout_prob (float): Probability of setting units to zero.
882891 is_test (bool): A flag indicating whether it is in test phrase or not.
883892 seed (int): A Python integer used to create random seeds. If this
884893 parameter is set to None, a random seed is used.
@@ -888,13 +897,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
888897 will be named automatically.
889898
890899 Returns:
891- Variable: A tensor variable.
900+ Variable: A tensor variable is the shape with `x` .
892901
893902 Examples:
894903 .. code-block:: python
895904
896- x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
897- droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
905+ x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
906+ droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
898907 """
899908
900909 helper = LayerHelper ('dropout' , ** locals ())
@@ -2547,17 +2556,16 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
25472556 name(str|None): A name for this layer(optional). If set None, the layer
25482557 will be named automatically.
25492558
2550-
25512559 Returns:
2552- Variable: The output tensor variable.
2560+ Variable: The output tensor variable is the same shape with `x` .
25532561
25542562 Examples:
25552563 .. code-block:: python
25562564
2557- data = fluid.layers.data(name="data",
2558- shape=(3, 17, 13),
2559- dtype="float32")
2560- normed = fluid.layers.l2_normalize(x=data, axis=1)
2565+ data = fluid.layers.data(name="data",
2566+ shape=(3, 17, 13),
2567+ dtype="float32")
2568+ normed = fluid.layers.l2_normalize(x=data, axis=1)
25612569 """
25622570
25632571 if len (x .shape ) == 1 :
0 commit comments