Skip to content

Commit ea4d445

Browse files
committed
Update some doc about layers' API.
1 parent d516ace commit ea4d445

File tree

4 files changed

+76
-50
lines changed

4 files changed

+76
-50
lines changed

paddle/fluid/operators/gaussian_random_batch_size_like_op.cc

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3636
void Apply() override {
3737
AddAttr<float>("mean",
3838
"(float, default 0.0) "
39-
"mean of random tensor.")
39+
"The mean (or center) of the gaussian distribution.")
4040
.SetDefault(.0f);
4141
AddAttr<float>("std",
4242
"(float, default 1.0) "
43-
"std of random tensor.")
43+
"The standard deviation (std, or spread) of the "
44+
"gaussian distribution.")
4445
.SetDefault(1.0f);
4546
AddAttr<int>("seed",
4647
"(int, default 0) "
@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
5556
.SetDefault(framework::proto::VarType::FP32);
5657

5758
AddComment(R"DOC(
58-
GaussianRandom Operator.
5959
6060
Used to initialize tensors with gaussian random generator.
61+
The defalut mean of the distribution is 0. and defalut standard
62+
deviation (std) of the distribution is 1.. Uers can set mean and std
63+
by input arguments.
6164
)DOC");
6265
}
6366
};

python/paddle/fluid/layers/io.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -386,16 +386,16 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
386386
Variable: A Reader Variable from which we can get random data.
387387
388388
Examples:
389-
.. code-block:: python
390-
391-
reader = fluid.layers.io.random_data_generator(
392-
low=0.0,
393-
high=1.0,
394-
shapes=[(3,224,224), (1)],
395-
lod_levels=[0, 0])
389+
.. code-block:: python
396390
397-
# Via the reader, we can use 'read_file' layer to get data:
398-
image, label = fluid.layers.io.read_file(reader)
391+
import paddle.fluid as fluid
392+
reader = fluid.layers.io.random_data_generator(
393+
low=0.0,
394+
high=1.0,
395+
shapes=[(3,224,224), (1)],
396+
lod_levels=[0, 0])
397+
# Via the reader, we can use 'read_file' layer to get data:
398+
image, label = fluid.layers.io.read_file(reader)
399399
"""
400400
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
401401
shape_concat = []

python/paddle/fluid/layers/nn.py

Lines changed: 44 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -312,8 +312,7 @@ def dynamic_lstm(input,
312312
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
313313
"tanh", "relu", "identity"], default "tanh".
314314
candidate_activation(str): The activation for candidate hidden state.
315-
Choices = ["sigmoid", "tanh",
316-
"relu", "identity"],
315+
Choices = ["sigmoid", "tanh", "relu", "identity"],
317316
default "tanh".
318317
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
319318
name(str|None): A name for this layer(optional). If set None, the layer
@@ -488,35 +487,38 @@ def dynamic_lstmp(input,
488487
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
489488
"tanh", "relu", "identity"], default "tanh".
490489
candidate_activation(str): The activation for candidate hidden state.
491-
Choices = ["sigmoid", "tanh",
492-
"relu", "identity"],
490+
Choices = ["sigmoid", "tanh", "relu", "identity"],
493491
default "tanh".
494492
proj_activation(str): The activation for projection output.
495-
Choices = ["sigmoid", "tanh",
496-
"relu", "identity"],
493+
Choices = ["sigmoid", "tanh", "relu", "identity"],
497494
default "tanh".
498495
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
499496
name(str|None): A name for this layer(optional). If set None, the layer
500497
will be named automatically.
501498
502499
Returns:
503-
tuple: The projection of hidden state, and cell state of LSTMP. The \
504-
shape of projection is (T x P), for the cell state which is \
505-
(T x D), and both LoD is the same with the `input`.
500+
tuple: A tuple of two output variable: the projection of hidden state, \
501+
and cell state of LSTMP. The shape of projection is (T x P), \
502+
for the cell state which is (T x D), and both LoD is the same \
503+
with the `input`.
506504
507505
Examples:
508506
.. code-block:: python
509507
510-
hidden_dim, proj_dim = 512, 256
511-
fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4,
512-
act=None, bias_attr=None)
513-
proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
514-
size=hidden_dim * 4,
515-
proj_size=proj_dim,
516-
use_peepholes=False,
517-
is_reverse=True,
518-
cell_activation="tanh",
519-
proj_activation="tanh")
508+
dict_dim, emb_dim = 128, 64
509+
data = fluid.layers.data(name='sequence', shape=(128),
510+
dtype='int32', lod_level=1)
511+
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
512+
hidden_dim, proj_dim = 512, 256
513+
fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4,
514+
act=None, bias_attr=None)
515+
proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
516+
size=hidden_dim * 4,
517+
proj_size=proj_dim,
518+
use_peepholes=False,
519+
is_reverse=True,
520+
cell_activation="tanh",
521+
proj_activation="tanh")
520522
"""
521523

522524
helper = LayerHelper('lstmp', **locals())
@@ -574,10 +576,10 @@ def dynamic_gru(input,
574576
candidate_activation='tanh',
575577
h_0=None):
576578
"""
577-
**Dynamic GRU Layer**
579+
**Gated Recurrent Unit (GRU) Layer**
578580
579581
Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on
580-
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_
582+
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ .
581583
582584
The formula is as follows:
583585
@@ -624,17 +626,24 @@ def dynamic_gru(input,
624626
Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
625627
candidate_activation(str): The activation for candidate hidden state.
626628
Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
627-
h_0 (Variable): The hidden output of the first time step.
629+
h_0 (Variable): This is initial hidden state. If not set, default is
630+
zero. This is a tensor with shape (N x D), where N is the number of
631+
total time steps of input mini-batch feature and D is the hidden
632+
size.
628633
629634
Returns:
630635
Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \
631-
and lod is the same with the input.
636+
and sequence length is the same with the input.
632637
633638
Examples:
634639
.. code-block:: python
635640
641+
dict_dim, emb_dim = 128, 64
642+
data = fluid.layers.data(name='sequence', shape=(128),
643+
dtype='int32', lod_level=1)
644+
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
636645
hidden_dim = 512
637-
x = fluid.layers.fc(input=data, size=hidden_dim * 3)
646+
x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
638647
hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim)
639648
"""
640649

@@ -872,13 +881,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
872881
873882
Drop or keep each element of `x` independently. Dropout is a regularization
874883
technique for reducing overfitting by preventing neuron co-adaption during
875-
training. The dropout operator randomly set (according to the given dropout
884+
training. The dropout operator randomly sets (according to the given dropout
876885
probability) the outputs of some units to zero, while others are remain
877886
unchanged.
878887
879888
Args:
880-
x (Variable): The input tensor.
881-
dropout_prob (float): Probability of setting units to zero.
889+
x (Variable): The input tensor variable.
890+
dropout_prob (float): Probability of setting units to zero.
882891
is_test (bool): A flag indicating whether it is in test phrase or not.
883892
seed (int): A Python integer used to create random seeds. If this
884893
parameter is set to None, a random seed is used.
@@ -888,13 +897,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
888897
will be named automatically.
889898
890899
Returns:
891-
Variable: A tensor variable.
900+
Variable: A tensor variable is the shape with `x`.
892901
893902
Examples:
894903
.. code-block:: python
895904
896-
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
897-
droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
905+
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
906+
droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
898907
"""
899908

900909
helper = LayerHelper('dropout', **locals())
@@ -2547,17 +2556,16 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
25472556
name(str|None): A name for this layer(optional). If set None, the layer
25482557
will be named automatically.
25492558
2550-
25512559
Returns:
2552-
Variable: The output tensor variable.
2560+
Variable: The output tensor variable is the same shape with `x`.
25532561
25542562
Examples:
25552563
.. code-block:: python
25562564
2557-
data = fluid.layers.data(name="data",
2558-
shape=(3, 17, 13),
2559-
dtype="float32")
2560-
normed = fluid.layers.l2_normalize(x=data, axis=1)
2565+
data = fluid.layers.data(name="data",
2566+
shape=(3, 17, 13),
2567+
dtype="float32")
2568+
normed = fluid.layers.l2_normalize(x=data, axis=1)
25612569
"""
25622570

25632571
if len(x.shape) == 1:

python/paddle/fluid/layers/tensor.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -486,11 +486,26 @@ def save_combine(x, file_path, overwrite=True):
486486
Saves a list of variables into a single file.
487487
488488
Args:
489-
x(list): A list of Tensor/LoDTensor to be saved together in a single file.
489+
x(list): A list of Tensor/LoDTensor variables to be saved together in
490+
a single file.
490491
file_path(str): The file path where variables will be saved.
491-
overwrite(bool): Whether or not cover the given file when it has already
492+
overwrite(bool): Whether or not cover the given file when it has already
492493
existed. If it's set 'False' and the file is existed, a runtime
493494
error will be thrown.
495+
496+
Returns:
497+
There is no return value.
498+
499+
Examples:
500+
.. code-block:: python
501+
502+
v1 = fluid.layers.data(name="data",
503+
shape=(4, 6),
504+
dtype="float32")
505+
v2 = fluid.layers.data(name="data",
506+
shape=(6, 8, 4),
507+
dtype="float32")
508+
normed = fluid.layers.save_combine([v1, v2], file_path="output")
494509
"""
495510
helper = LayerHelper("save_combine", **locals())
496511
helper.append_op(

0 commit comments

Comments
 (0)