diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 7986bc8499427a..9624f752b780fd 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -247,7 +247,7 @@ void AttentionLSTMOpMaker::Make() { " - Weight = {W_forget, W_input, W_output, W_cell}"); AddInput("LSTMBias", "(phi::DenseTensor) the combined bias of LSTM, shape (1x4D)." - "Note: we should add the bias of hidden and context accorindg to " + "Note: we should add the bias of hidden and context according to " "the same gate: " "{B_forget, B_input, B_output, B_cell}"); AddOutput( diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h index 26f428b165256b..d9a1d419f5a9e7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h @@ -101,8 +101,8 @@ class SequenceExpandAsKernel : public framework::OpKernel { out->mutable_data(context.GetPlace()); auto &dev_ctx = context.template device_context(); - SequenceExpandAsFunctor seq_espand_functor; - seq_espand_functor(dev_ctx, *x, y_lod[0], out); + SequenceExpandAsFunctor seq_expand_functor; + seq_expand_functor(dev_ctx, *x, y_lod[0], out); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index 1c31293e33c858..d033ac210c7c8e 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -188,7 +188,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker { X.data = [a, b, c, d, e] and Input(PadValue): PadValue.data = [0] - and attribite 'padded_length' = 4, + and attribute 'padded_length' = 4, then we get phi::DenseTensor: Out.data = [[a, b, 0, 0], [c, d, e, 0]] @@ -201,7 +201,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker { X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]] and Input(PadValue): PadValue.data = [0] - and attribite 'padded_length' = -1, which mean using the length + and attribute 'padded_length' = -1, which mean using the length of longest input sequence(3 in this case), then we get phi::DenseTensor: Out.data = [[[a1, a2], [b1, b2], [0, 0]], @@ -215,7 +215,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker { X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]] and Input(PadValue): PadValue.data = [p1, p2] - and attribite 'padded_length' = -1, which mean using the length + and attribute 'padded_length' = -1, which mean using the length of longest input sequence(3 in this case), then we get phi::DenseTensor: Out.data = [[[a1, a2], [b1, b2], [p1, p2]], diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc index 6088b8181646ba..4b19faea335bf9 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc @@ -99,9 +99,10 @@ class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(LoDTensor, default LoDTensor) Input tensor which " "contains the padded sequences with equal length."); - AddInput("Length", - "(LoDTensor) The input tensor which specifies the actual ength of " - "sequences after unpadding."); + AddInput( + "Length", + "(LoDTensor) The input tensor which specifies the actual length of " + "sequences after unpadding."); AddOutput( "Out", "(LoDTensor) The output tensor which contains unpadded sequences."); @@ -109,7 +110,7 @@ class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker { Sequence Unpad Operator This operator removes the padding data in the input sequences and convert - them into sequences with actual length as output, identitied by lod + them into sequences with actual length as output, identified by lod information. Example: diff --git a/paddle/fluid/operators/string/faster_tokenizer_op.cc b/paddle/fluid/operators/string/faster_tokenizer_op.cc index 68126e187b4e58..019fb0422580fb 100644 --- a/paddle/fluid/operators/string/faster_tokenizer_op.cc +++ b/paddle/fluid/operators/string/faster_tokenizer_op.cc @@ -365,7 +365,7 @@ int BertTokenizer::Encode( vector token_type_ids; CreateTokenTypeIdsFromSequences(&token_type_ids, ids, pair_ids); - // Build output dictionnary + // Build output dictionary encoded_inputs->emplace("input_ids", sequence); encoded_inputs->emplace("token_type_ids", token_type_ids); // Check lengths diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc index b229c4aed79b21..6008cd642828d9 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc @@ -36,7 +36,7 @@ class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("max_batch_size", "the maximum batch size."); AddAttr("workspace_size", "the workspace size.").AsExtra(); AddAttr("sub_block", "the trt block"); - AddAttr("enable_int8", "whether swith to int8 mode"); + AddAttr("enable_int8", "whether switch to int8 mode"); AddComment("TensorRT engine operator."); } }; diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index d60ecd33792025..28dcaf3d43e316 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -291,7 +291,7 @@ class TensorRTEngineOp : public framework::OperatorBase { auto t_shape = common::vectorize(t.dims()); runtime_input_shape.insert(std::make_pair(name, t_shape)); // We need collect value range for shape tensor for Paddle-TRT's use. - // To be noticed, this method to identify all inputd/outputs is shape + // To be noticed, this method to identify all inputs/outputs is shape // tensors; After, TRT Engine gets whether it is a real shape tensor. auto is_shape_tensor = true; if (trt_engine->engine()) { @@ -512,7 +512,7 @@ class TensorRTEngineOp : public framework::OperatorBase { int binding_offset = 0; nvinfer1::IExecutionContext *trt_context = nullptr; if (engine->with_dynamic_shape()) { - // Initilize context and get offset by profile index + // Initialize context and get offset by profile index trt_context = engine->context(); binding_offset = engine->GetBindingsOffset(); } @@ -606,7 +606,7 @@ class TensorRTEngineOp : public framework::OperatorBase { "by " "setting min_subgraph_size using EnableTensorrtEngine " "interface.\n" - "\tThe min_subgraph_size shouble to be greater than the " + "\tThe min_subgraph_size should to be greater than the " "number " "of " "nodes in the inconsistent subgraph.\n")); @@ -828,7 +828,7 @@ class TensorRTEngineOp : public framework::OperatorBase { "by " "setting min_subgraph_size using EnableTensorrtEngine " "interface.\n" - "\tThe min_subgraph_size shouble to be greater than the number " + "\tThe min_subgraph_size should to be greater than the number " "of " "nodes in the inconsistent subgraph.\n", runtime_batch,