Skip to content

Commit 29ddf6c

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into dev_add_doc
2 parents b00fbd9 + fd77126 commit 29ddf6c

27 files changed

+836
-384
lines changed

paddle/fluid/inference/tensorrt/convert/op_converter.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ class OpConverter {
6464
(*it)(op, scope, test_mode);
6565
}
6666

67-
// convert fluid block to tensorrt network
67+
// Convert a fluid block to tensorrt network, NOTE it just convert operators,
68+
// the INetwork's inputs and outputs should specified in some other modules.
6869
void ConvertBlock(const framework::proto::BlockDesc& block,
6970
const std::unordered_set<std::string>& parameters,
7071
const framework::Scope& scope, TensorRTEngine* engine) {

paddle/fluid/inference/tensorrt/engine.h

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,12 @@ class TensorRTEngine : public EngineBase {
5151
nvinfer1::Weights w_;
5252
};
5353

54-
TensorRTEngine(int max_batch, int max_workspace, cudaStream_t* stream,
54+
TensorRTEngine(int max_batch, int max_workspace,
55+
cudaStream_t* stream = nullptr,
5556
nvinfer1::ILogger& logger = NaiveLogger::Global())
5657
: max_batch_(max_batch),
5758
max_workspace_(max_workspace),
58-
stream_(stream),
59+
stream_(stream ? stream : &default_stream_),
5960
logger_(logger) {}
6061

6162
virtual ~TensorRTEngine();
@@ -121,6 +122,8 @@ class TensorRTEngine : public EngineBase {
121122
// the max memory size the engine uses
122123
int max_workspace_;
123124
cudaStream_t* stream_;
125+
// If stream_ is not set from outside, hold its own stream.
126+
cudaStream_t default_stream_;
124127
nvinfer1::ILogger& logger_;
125128

126129
std::vector<Buffer> buffers_;
@@ -165,20 +168,31 @@ class TensorRTEngine : public EngineBase {
165168
*/
166169
class TRT_EngineManager {
167170
public:
168-
TensorRTEngine* Create(int max_batch, int max_workspace,
169-
cudaStream_t* stream) {
170-
engines_.emplace_back(new TensorRTEngine(max_batch, max_workspace, stream));
171-
return engines_.back().get();
171+
bool HasEngine(const std::string& name) const {
172+
return engines_.count(name) != 0;
173+
}
174+
175+
// Get an engine called `name`.
176+
TensorRTEngine* Get(const std::string& name) const {
177+
return engines_.at(name).get();
178+
}
179+
180+
// Create or get an engine called `name`
181+
TensorRTEngine* Create(int max_batch, int max_workspace, cudaStream_t* stream,
182+
const std::string& name) {
183+
auto* p = new TensorRTEngine(max_batch, max_workspace, stream);
184+
engines_[name].reset(p);
185+
return p;
172186
}
173187

174188
void DeleteALl() {
175-
for (auto& ptr : engines_) {
176-
ptr.reset(nullptr);
189+
for (auto& item : engines_) {
190+
item.second.reset(nullptr);
177191
}
178192
}
179193

180194
private:
181-
std::vector<std::unique_ptr<TensorRTEngine>> engines_;
195+
std::unordered_map<std::string, std::unique_ptr<TensorRTEngine>> engines_;
182196
};
183197

184198
} // namespace tensorrt

paddle/fluid/operators/activation_op.cc

Lines changed: 24 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -252,15 +252,14 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
252252
AddOutput("Out", "Output of Softshrink operator");
253253
AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
254254
AddComment(R"DOC(
255-
Softshrink Activation Operator.
255+
:strong:`Softshrink Activation Operator`
256256
257-
$$
258-
out = \begin{cases}
259-
x - \lambda, \text{if } x > \lambda \\
260-
x + \lambda, \text{if } x < -\lambda \\
261-
0, \text{otherwise}
262-
\end{cases}
263-
$$
257+
.. math::
258+
out = \begin{cases}
259+
x - \lambda, \text{if } x > \lambda \\
260+
x + \lambda, \text{if } x < -\lambda \\
261+
0, \text{otherwise}
262+
\end{cases}
264263
265264
)DOC");
266265
}
@@ -271,18 +270,18 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
271270
void Make() override {
272271
AddInput("X", "Input of HardShrink operator");
273272
AddOutput("Out", "Output of HardShrink operator");
274-
AddAttr<float>("threshold", "The value of threshold for HardShrink")
273+
AddAttr<float>("threshold",
274+
"The value of threshold for HardShrink. [default: 0.5]")
275275
.SetDefault(0.5f);
276276
AddComment(R"DOC(
277-
HardShrink Activation Operator.
277+
:strong:`HardShrink activation operator`
278278
279-
$$
280-
out = \begin{cases}
281-
x, \text{if } x > \lambda \\
282-
x, \text{if } x < -\lambda \\
283-
0, \text{otherwise}
284-
\end{cases}
285-
$$
279+
.. math::
280+
out = \begin{cases}
281+
x, \text{if } x > \lambda \\
282+
x, \text{if } x < -\lambda \\
283+
0, \text{otherwise}
284+
\end{cases}
286285
287286
)DOC");
288287
}
@@ -394,18 +393,18 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
394393
void Make() override {
395394
AddInput("X", "Input of ThresholdedRelu operator");
396395
AddOutput("Out", "Output of ThresholdedRelu operator");
397-
AddAttr<float>("threshold", "The threshold location of activation")
396+
AddAttr<float>("threshold",
397+
"The threshold location of activation. [default 1.0].")
398398
.SetDefault(1.0f);
399399
AddComment(R"DOC(
400-
ThresholdedRelu Activation Operator.
400+
:strong:`ThresholdedRelu activation operator`
401401
402-
$$
403-
out = \begin{cases}
404-
x, \text{if } x > threshold \\
405-
0, \text{otherwise}
406-
\end{cases}
407-
$$
402+
.. math::
408403
404+
out = \begin{cases}
405+
x, \text{if } x > threshold \\
406+
0, \text{otherwise}
407+
\end{cases}
409408
)DOC");
410409
}
411410
};

paddle/fluid/operators/compare_op.cc

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,30 +23,26 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
2323
public:
2424
void Make() override {
2525
OpComment comment;
26-
AddInput("X",
27-
string::Sprintf("(LoDTensor) the left hand operand of %s operator",
28-
comment.type));
29-
AddInput("Y", string::Sprintf(
30-
"(LoDTensor) the right hand operand of %s operator",
31-
comment.type));
26+
AddInput("X", string::Sprintf("the left hand operand of %s operator",
27+
comment.type));
28+
AddInput("Y", string::Sprintf("the right hand operand of %s operator",
29+
comment.type));
3230
AddAttr<bool>("force_cpu",
33-
"(bool, default false) Force fill output variable to cpu "
31+
"Force fill output variable to cpu "
3432
"memory. Otherwise, fill output variable to the running "
35-
"device")
36-
.SetDefault(false);
37-
AddOutput("Out", string::Sprintf(
38-
"(LoDTensor) n-dim bool tensor. Each element is %s",
39-
comment.equation));
40-
AddComment(string::Sprintf(R"DOC(%s Operator
41-
33+
"device [default true].")
34+
.SetDefault(true);
35+
AddOutput("Out", string::Sprintf("n-dim bool tensor. Each element is %s",
36+
comment.equation));
37+
AddComment(string::Sprintf(R"DOC(
4238
It operates element-wise on X and Y, and returns the Out. Each of them is a
4339
N-dim tensor. X and Y could be any type. The each element of the Out tensor is
44-
calculated by %s
40+
calculated by $%s$
4541
)DOC",
46-
comment.type, comment.equation));
47-
AddAttr<int>("axis",
48-
"(int, default -1). The start dimension index "
49-
"for broadcasting Y onto X.")
42+
comment.equation));
43+
AddAttr<int>(
44+
"axis",
45+
"The start dimension index for broadcasting Y onto X. [default -1]")
5046
.SetDefault(-1)
5147
.EqualGreaterThan(-1);
5248
}

paddle/fluid/operators/cumsum_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,19 +30,19 @@ class CumOp : public framework::OperatorWithKernel {
3030
class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
3131
public:
3232
void Make() override {
33-
AddInput("X", "Input of Cumsum operator");
34-
AddOutput("Out", "Output of Cumsum operator");
33+
AddInput("X", "Input of cumsum operator");
34+
AddOutput("Out", "Output of cumsum operator");
3535
AddAttr<int>("axis",
36-
"(int, default -1). The dimenstion to accumulate along. "
37-
"-1 means the last dimenstion")
36+
"The dimenstion to accumulate along. -1 means the last "
37+
"dimenstion [default -1].")
3838
.SetDefault(-1)
3939
.EqualGreaterThan(-1);
4040
AddAttr<bool>("exclusive",
41-
"bool, default false). Whether to perform exclusive cumsum")
41+
"Whether to perform exclusive cumsum. [default false].")
4242
.SetDefault(false);
4343
AddAttr<bool>("reverse",
44-
"bool, default false). If true, the cumsum is performed in "
45-
"the reversed direction")
44+
"If true, the cumsum is performed in the reversed direction. "
45+
"[default false].")
4646
.SetDefault(false);
4747
AddComment(R"DOC(
4848
The cumulative sum of the elements along a given axis.

paddle/fluid/operators/layer_norm_op.cc

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -62,47 +62,48 @@ class LayerNormOp : public framework::OperatorWithKernel {
6262
class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
6363
public:
6464
void Make() override {
65-
AddInput("X", "(LoDTensor) The input tensor.");
65+
AddInput("X", "The input tensor.");
6666
AddInput("Scale",
67-
"(Tensor, optional) Scale is a 1-dimensional tensor of size "
67+
"(optional) Scale is a 1-dimensional tensor of size "
6868
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
6969
"It is applied to the output.")
7070
.AsDispensable();
7171
AddInput("Bias",
72-
"(Tensor, optional) Bias is a 1-dimensional tensor of size "
72+
"(optional) Bias is a 1-dimensional tensor of size "
7373
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
7474
"It is applied to the output.")
7575
.AsDispensable();
76-
AddOutput("Y", "(LoDTensor) Result after normalization.");
77-
AddOutput("Mean", "(Tensor) Mean of the current mini batch.")
78-
.AsIntermediate();
79-
AddOutput("Variance", "(Tensor) Variance of the current mini batch.")
76+
AddOutput("Y", "Result after normalization.");
77+
AddOutput("Mean", "Mean of the current mini batch.").AsIntermediate();
78+
AddOutput("Variance", "Variance of the current mini batch.")
8079
.AsIntermediate();
8180

8281
AddAttr<float>("epsilon",
83-
"(float, default 1e-5) Constant for "
84-
"numerical stability")
82+
"Constant for numerical stability [default 1e-5].")
8583
.SetDefault(1e-5)
8684
.AddCustomChecker([](const float &epsilon) {
8785
PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f,
8886
"'epsilon' should be between 0.0 and 0.001.");
8987
});
9088
AddAttr<int>("begin_norm_axis",
91-
"(int default:1), the "
92-
"axis of `begin_norm_axis ... Rank(X) - 1` will be "
89+
"the axis of `begin_norm_axis ... Rank(X) - 1` will be "
9390
"normalized. `begin_norm_axis` splits the tensor(`X`) to a "
94-
"matrix [N,H].")
91+
"matrix [N,H]. [default 1].")
9592
.SetDefault(1)
9693
.AddCustomChecker([](const int &begin_norm_axis) {
9794
PADDLE_ENFORCE_GT(begin_norm_axis, 0,
9895
"'begin_norm_axis' should be greater than zero.");
9996
});
10097

10198
AddComment(R"DOC(
102-
Layer Normalization.
103-
Layer Norm has been implemented as discussed in the paper:
104-
https://arxiv.org/abs/1607.06450
105-
...
99+
Assume feature vectors exist on dimensions
100+
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
101+
along these dimensions for each feature vector :math:`a` with size
102+
:math:`H`, then normalize each feature vector using the corresponding
103+
statistics. After that, apply learnable gain and bias on the normalized
104+
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.
105+
106+
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
106107
)DOC");
107108
}
108109
};

paddle/fluid/operators/listen_and_serv_op.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,8 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
348348
};
349349

350350
void SignalHandler::StopAndExit(int signal_num) {
351-
VLOG(3) << "Catch interrupt signal: " << signal_num << ", program will exit";
351+
// Do not use VLOG here for the device for printing maybe already released.
352+
// exit will release interal allocated resoureces.
352353
exit(0);
353354
}
354355

paddle/fluid/operators/mean_op.cc

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,10 @@ class MeanOp : public framework::OperatorWithKernel {
3333
class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
3434
public:
3535
void Make() override {
36-
AddInput("X", "The input of mean op");
37-
AddOutput("Out", "The output of mean op").Reuse("X");
36+
AddInput("X", "(Tensor) The input of mean op");
37+
AddOutput("Out", "(Tensor) The output of mean op").Reuse("X");
3838
AddComment(R"DOC(
39-
Mean Operator.
40-
41-
Out is a scalar which is the mean of all elements in X.
39+
Mean Operator calculates the mean of all elements in X.
4240
4341
)DOC");
4442
}

paddle/fluid/operators/multiplex_op.cc

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -62,26 +62,46 @@ class MultiplexOp : public framework::OperatorWithKernel {
6262
class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker {
6363
public:
6464
void Make() override {
65-
AddInput("Ids", "The index tensor of multiplex operator.");
66-
AddInput("X", "The candidate tensors of multiplex operator.")
65+
AddInput("Ids",
66+
"Tensor<int32>, index variable which is a 2-D tensor with shape "
67+
"[M, 1] where M is the batch size.");
68+
AddInput("X",
69+
"A list of variables to gather from. All variables have the same "
70+
"shape and the rank is at least 2.")
6771
.AsDuplicable();
6872
AddOutput("Out", "The output tensor of multiplex operator.");
6973
AddComment(R"DOC(
70-
Multiplex Operator.
71-
72-
Multiplex multiple tensors according to the index provided by the index tensor.
73-
74-
Ids: the index tensor.
75-
X[0 : N - 1]: the candidate tensors for output (N >= 2).
76-
For each index i from 0 to batchSize - 1, the output is the i-th row of the
74+
Referring to the given index variable, this layer selects rows from the
75+
input variables to construct a multiplex variable. Assuming that there are
76+
:math:`m` input variables and :math:`I_i` represents the i-th input
77+
variable and :math:`i` is in [0, :math:`m`). All input variables are
78+
tensors with same shape [:math:`d_0`, :math:`d_1`, ..., :math:`d_R`].
79+
Please note that rank of the input tensor should be at least 2. Each input
80+
variable will be treated as a 2-D matrix with shape [:math:`M`, :math:`N`]
81+
where :math:`M` for :math:`d_0` and :math:`N` for :math:`d_1` * :math:`d_2`
82+
* ... * :math:`d_R`. Let :math:`I_i[j]` be the j-th row of the i-th input
83+
variable. The given index variable should be a 2-D tensor with shape
84+
[:math:`M`, 1]. Let `ID[i]` be the i-th index value of the index variable.
85+
Then the output variable will be a tensor with shape [:math:`d_0`,
86+
:math:`d_1`, ..., :math:`d_R`]. If we treat the output tensor as a 2-D
87+
matrix with shape [:math:`M`, :math:`N`] and let :math:`O[i]` be the i-th
88+
row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`.
89+
90+
* Ids: the index tensor.
91+
92+
* X[0 : N - 1]: the candidate tensors for output (N >= 2).
93+
94+
* For each index i from 0 to batchSize - 1, the output is the i-th row of the
7795
the (Ids[i])-th tensor.
7896
7997
For i-th row of the output tensor:
8098
81-
$$y[i] = x_{k}[i]$$
99+
$$
100+
y[i] = x_{k}[i]
101+
$$
82102
83-
where `y` is the output tensor, `x_{k}` is the k-th input tensor,
84-
and `k = Ids[i]`.
103+
where $y$ is the output tensor, $x_{k}$ is the k-th input tensor,
104+
and $k = Ids[i]$.
85105
86106
)DOC");
87107
}

paddle/fluid/operators/reader/create_recordio_file_reader_op.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,15 @@ class CreateRecordIOReaderOp : public framework::OperatorBase {
7878
class CreateRecordIOReaderOpMaker : public FileReaderMakerBase {
7979
protected:
8080
void Apply() override {
81-
AddAttr<std::string>("filename", "The filename of record io reader");
81+
AddAttr<std::string>(
82+
"filename",
83+
"The filename of record file. This file will given to reader.");
8284
AddComment(R"DOC(
83-
CreateRecordIOReader Operator
85+
Open a recordio file and return the reader object. The returned reader object
86+
is thread-safe.
8487
85-
Create a reader from a record io file
88+
NOTE: This is a very low-level API. It is used for debugging data file or
89+
training. Please use `open_files` instead of this API for production usage.
8690
)DOC");
8791
}
8892
};

0 commit comments

Comments
 (0)