Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/attention_lstm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ void AttentionLSTMOpMaker::Make() {
" - Weight = {W_forget, W_input, W_output, W_cell}");
AddInput("LSTMBias",
"(phi::DenseTensor) the combined bias of LSTM, shape (1x4D)."
"Note: we should add the bias of hidden and context accorindg to "
"Note: we should add the bias of hidden and context according to "
"the same gate: "
"{B_forget, B_input, B_output, B_cell}");
AddOutput(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ class SequenceExpandAsKernel : public framework::OpKernel<T> {
out->mutable_data<T>(context.GetPlace());

auto &dev_ctx = context.template device_context<DeviceContext>();
SequenceExpandAsFunctor<DeviceContext, T> seq_espand_functor;
seq_espand_functor(dev_ctx, *x, y_lod[0], out);
SequenceExpandAsFunctor<DeviceContext, T> seq_expand_functor;
seq_expand_functor(dev_ctx, *x, y_lod[0], out);
}
};

Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/sequence_ops/sequence_pad_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
X.data = [a, b, c, d, e]
and Input(PadValue):
PadValue.data = [0]
and attribite 'padded_length' = 4,
and attribute 'padded_length' = 4,
then we get phi::DenseTensor:
Out.data = [[a, b, 0, 0],
[c, d, e, 0]]
Expand All @@ -201,7 +201,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]]
and Input(PadValue):
PadValue.data = [0]
and attribite 'padded_length' = -1, which mean using the length
and attribute 'padded_length' = -1, which mean using the length
of longest input sequence(3 in this case),
then we get phi::DenseTensor:
Out.data = [[[a1, a2], [b1, b2], [0, 0]],
Expand All @@ -215,7 +215,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]]
and Input(PadValue):
PadValue.data = [p1, p2]
and attribite 'padded_length' = -1, which mean using the length
and attribute 'padded_length' = -1, which mean using the length
of longest input sequence(3 in this case),
then we get phi::DenseTensor:
Out.data = [[[a1, a2], [b1, b2], [p1, p2]],
Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,17 +99,18 @@ class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X",
"(LoDTensor, default LoDTensor<float>) Input tensor which "
"contains the padded sequences with equal length.");
AddInput("Length",
"(LoDTensor) The input tensor which specifies the actual ength of "
"sequences after unpadding.");
AddInput(
"Length",
"(LoDTensor) The input tensor which specifies the actual length of "
"sequences after unpadding.");
AddOutput(
"Out",
"(LoDTensor) The output tensor which contains unpadded sequences.");
AddComment(R"DOC(
Sequence Unpad Operator

This operator removes the padding data in the input sequences and convert
them into sequences with actual length as output, identitied by lod
them into sequences with actual length as output, identified by lod
information.

Example:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/string/faster_tokenizer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ int BertTokenizer::Encode(
vector<int64_t> token_type_ids;
CreateTokenTypeIdsFromSequences(&token_type_ids, ids, pair_ids);

// Build output dictionnary
// Build output dictionary
encoded_inputs->emplace("input_ids", sequence);
encoded_inputs->emplace("token_type_ids", token_type_ids);
// Check lengths
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("max_batch_size", "the maximum batch size.");
AddAttr<int64_t>("workspace_size", "the workspace size.").AsExtra();
AddAttr<framework::BlockDesc *>("sub_block", "the trt block");
AddAttr<bool>("enable_int8", "whether swith to int8 mode");
AddAttr<bool>("enable_int8", "whether switch to int8 mode");
AddComment("TensorRT engine operator.");
}
};
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/tensorrt/tensorrt_engine_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
auto t_shape = common::vectorize<int32_t>(t.dims());
runtime_input_shape.insert(std::make_pair(name, t_shape));
// We need collect value range for shape tensor for Paddle-TRT's use.
// To be noticed, this method to identify all inputd/outputs is shape
// To be noticed, this method to identify all inputs/outputs is shape
// tensors; After, TRT Engine gets whether it is a real shape tensor.
auto is_shape_tensor = true;
if (trt_engine->engine()) {
Expand Down Expand Up @@ -512,7 +512,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
int binding_offset = 0;
nvinfer1::IExecutionContext *trt_context = nullptr;
if (engine->with_dynamic_shape()) {
// Initilize context and get offset by profile index
// Initialize context and get offset by profile index
trt_context = engine->context();
binding_offset = engine->GetBindingsOffset();
}
Expand Down Expand Up @@ -606,7 +606,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
"by "
"setting min_subgraph_size using EnableTensorrtEngine "
"interface.\n"
"\tThe min_subgraph_size shouble to be greater than the "
"\tThe min_subgraph_size should to be greater than the "
"number "
"of "
"nodes in the inconsistent subgraph.\n"));
Expand Down Expand Up @@ -828,7 +828,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
"by "
"setting min_subgraph_size using EnableTensorrtEngine "
"interface.\n"
"\tThe min_subgraph_size shouble to be greater than the number "
"\tThe min_subgraph_size should to be greater than the number "
"of "
"nodes in the inconsistent subgraph.\n",
runtime_batch,
Expand Down