diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc deleted file mode 100644 index f65dc988bfebda..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ /dev/null @@ -1,305 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h" - -#include -#include - -namespace paddle { -namespace operators { - -class SequencePadOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput("X"), - true, - phi::errors::NotFound("Input(X) of SequencePadOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasInput("PadValue"), - true, - phi::errors::NotFound( - "Input(PadValue) of SequencePadOp should not be null.")); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), - true, - phi::errors::NotFound( - "Output(Out) of SequencePadOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput("Length"), - true, - phi::errors::NotFound( - "Output(Length) of SequencePadOp should not be null.")); - - auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_GE(x_dims.size(), - 2, - phi::errors::InvalidArgument( - "The rank of SequencePadOp Input(X) can't be less " - "than 2. But the rank we received is %d", - x_dims.size())); - auto time_step_dims = common::slice_ddim(x_dims, 1, x_dims.size()); - auto pad_value_dims = ctx->GetInputDim("PadValue"); - PADDLE_ENFORCE_EQ( - pad_value_dims == common::make_ddim({1}) || - pad_value_dims == common::make_ddim({}) || - pad_value_dims == time_step_dims, - true, - phi::errors::InvalidArgument( - "The SequencePadOp Input(PadValue) must be a scalar or a tensor " - "whose shape equals to time steps in sequences")); - - int out_dim_0 = -1; - - int padded_length = ctx->Attrs().Get("padded_length"); - if (ctx->IsRuntime()) { - // run time - framework::Variable* x_var = - PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); - const auto& x_lod = x_var->Get().lod(); - PADDLE_ENFORCE_EQ(x_lod.empty(), - false, - phi::errors::NotFound( - "The SequencePadOp Input(X) must hold lod info.")); - const auto& x_lod_0 = x_lod[0]; - PADDLE_ENFORCE_GE( - x_lod_0.size(), - 2, - phi::errors::InvalidArgument( - "The size of SequencePadOp Input(X)'s lod info can't be less " - "than 2. But the size we received is %d", - x_lod_0.size())); - PADDLE_ENFORCE_EQ(x_dims[0], - static_cast(x_lod_0.back()), - phi::errors::InvalidArgument( - "The SequencePadOp Input(X)'s lod info mismatches " - "the actual tensor shape. The 1st dimension of " - "Input(X)'s lod info is %d, the 1st dimension of " - "actual tensor shape is %d", - x_dims[0], - static_cast(x_lod_0.back()))); - - int seq_num = static_cast(x_lod_0.size() - 1); - int max_seq_len = - static_cast(phi::funcs::MaximumSequenceLength(x_lod_0)); - if (padded_length == -1) { - padded_length = max_seq_len; - } - PADDLE_ENFORCE_GE( - padded_length, - max_seq_len, - phi::errors::InvalidArgument( - "The SequencePadOp Attr(padded_length) should be greater than or " - "equal to the " - "length of the longest original sequence. But the padded_length " - "we received is %d, the length of the longest original sequence " - "is %d", - padded_length, - max_seq_len)); - out_dim_0 = seq_num; - } else { - // compile time - if (padded_length == -1) { - padded_length = 1; - } - PADDLE_ENFORCE_GT( - ctx->GetLoDLevel("X"), - 0, - phi::errors::InvalidArgument( - "The LoD level of SequencePadOp Input(X) should be " - "larger than 0. But the LoD level we received is %d", - ctx->GetLoDLevel("X"))); - } - - std::vector out_dims_vec{out_dim_0, padded_length}; - std::vector len_dims_vec{out_dim_0}; - auto time_step_dims_vec = common::vectorize(time_step_dims); - out_dims_vec.insert(out_dims_vec.end(), - time_step_dims_vec.begin(), - time_step_dims_vec.end()); - ctx->SetOutputDim("Out", common::make_ddim(out_dims_vec)); - ctx->SetOutputDim("Length", common::make_ddim(len_dims_vec)); - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(data_type, ctx.GetPlace()); - } -}; - -class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "(phi::DenseTensor, default phi::DenseTensor) Input " - "variable which " - "should contain lod information."); - AddInput("PadValue", - "(phi::DenseTensor), this phi::DenseTensor holds values that will " - "be fill into " - "padded steps. It can be a scalar or a tensor whose shape equals " - "to time steps in sequences. If it's a scalar, it will be " - "automatically broadcasted to the shape of time step."); - AddOutput("Out", - "(phi::DenseTensor) The output variable, which contains padded " - "sequences."); - AddOutput("Length", - "(phi::DenseTensor) The output variable, which contains the " - "actual length of " - "sequences before padding."); - AddAttr( - "padded_length", - "The length of padded sequences. It can be set to -1 or " - "any positive int. When it is -1, all sequences will be padded up to " - "the length of the longest one among them; when it a certain positive " - "value, it must be greater than the length of the longest original " - "sequence.") - .SetDefault(-1); - AddComment(R"DOC( - Sequence Pad Operator - - This operator pads sequences in a same batch to a consistent length. - The length is specified by attribute 'padded_length'. New elements, - whose values are specified by input 'PadValue', will be appended to - the end of each sequence, to make their final lengths consistent. - - Following are cases to better explain how this works: - - Case 1: - - Given a 1-level phi::DenseTensor input(X): - X.lod = [[0, 2, 5]] - X.data = [a, b, c, d, e] - and Input(PadValue): - PadValue.data = [0] - and attribute 'padded_length' = 4, - then we get phi::DenseTensor: - Out.data = [[a, b, 0, 0], - [c, d, e, 0]] - Length.data = [2, 3] - - Case 2: - - Given a 1-level phi::DenseTensor input(X): - X.lod = [[0, 2, 5]] - X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]] - and Input(PadValue): - PadValue.data = [0] - and attribute 'padded_length' = -1, which mean using the length - of longest input sequence(3 in this case), - then we get phi::DenseTensor: - Out.data = [[[a1, a2], [b1, b2], [0, 0]], - [[c1, c2], [d1, d2], [e1, e2]]] - Length.data = [2, 3] - - Case 3: - - Given a 1-level phi::DenseTensor input(X): - X.lod = [[0, 2, 5]] - X.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]] - and Input(PadValue): - PadValue.data = [p1, p2] - and attribute 'padded_length' = -1, which mean using the length - of longest input sequence(3 in this case), - then we get phi::DenseTensor: - Out.data = [[[a1, a2], [b1, b2], [p1, p2]], - [[c1, c2], [d1, d2], [e1, e2]]] - Length.data = [2, 3] - - )DOC"); - } -}; - -class SequencePadGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), - true, - phi::errors::NotFound( - "Input(X) of SequencePadGradOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), - true, - phi::errors::NotFound( - "Input(Out@GRAD) of SequencePadGradOp should not be null.")); - - if (ctx->HasOutput(framework::GradVarName("X"))) { - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ framework::GradVarName("X")); - } - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")); - return phi::KernelKey(data_type, ctx.GetPlace()); - } -}; - -template -class SequencePadGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("sequence_pad_grad"); - op->SetAttrMap(this->Attrs()); - op->SetInput("X", this->Input("X")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(SequencePadGradOpNoNeedBufferVarsInferer, - "X"); - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OPERATOR(sequence_pad, - ops::SequencePadOp, - ops::SequencePadOpMaker, - ops::SequencePadGradOpMaker, - ops::SequencePadGradOpMaker); -REGISTER_OPERATOR(sequence_pad_grad, - ops::SequencePadGradOp, - ops::SequencePadGradOpNoNeedBufferVarsInferer); - -PD_REGISTER_STRUCT_KERNEL(sequence_pad, - CPU, - ALL_LAYOUT, - ops::SequencePadOpKernel, - float, - double, - int, - int64_t) {} -PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad, - CPU, - ALL_LAYOUT, - ops::SequencePadGradOpKernel, - float, - double, - int, - int64_t) {} diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cu b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cu deleted file mode 100644 index 910a4eae21f1e8..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cu +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h" - -namespace ops = paddle::operators; -PD_REGISTER_STRUCT_KERNEL(sequence_pad, - GPU, - ALL_LAYOUT, - ops::SequencePadOpKernel, - float, - double, - int, - int64_t) {} -PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad, - GPU, - ALL_LAYOUT, - ops::SequencePadGradOpKernel, - float, - double, - int, - int64_t) {} diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h deleted file mode 100644 index dd15ff4c9935dc..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/memory/memcpy.h" -#include "paddle/phi/kernels/funcs/math_function.h" -#include "paddle/phi/kernels/funcs/sequence_padding.h" - -namespace paddle { -namespace operators { - -using LoD = framework::LoD; -template -class SequencePadOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - const auto* x = ctx.Input("X"); - auto* out = ctx.Output("Out"); - auto* len_t = ctx.Output("Length"); - out->mutable_data(ctx.GetPlace()); - - PADDLE_ENFORCE_EQ(x->lod().empty(), - false, - phi::errors::NotFound( - "Input(X) phi::DenseTensor of SequencePadOp does not " - "contain LoD information.")); - - const auto* pad_value = ctx.Input("PadValue"); - - int padded_length = ctx.Attr("padded_length"); - - phi::funcs::PaddingLoDTensorFunctor()( - ctx.template device_context(), - *x, - out, - *pad_value, - padded_length, - 0, - false, - phi::funcs::kBatchLengthWidth); - - phi::DenseTensor seq_len; - seq_len.Resize(len_t->dims()); - int64_t* len_data = seq_len.mutable_data(platform::CPUPlace()); - for (size_t i = 1; i < x->lod()[0].size(); ++i) { - len_data[i - 1] = x->lod()[0][i] - x->lod()[0][i - 1]; - } - framework::TensorCopy(seq_len, - ctx.GetPlace(), - ctx.template device_context(), - len_t); - } -}; - -template -class SequencePadGradOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* d_x = ctx.Output(framework::GradVarName("X")); - if (d_x) { - const auto* d_out = - ctx.Input(framework::GradVarName("Out")); - d_x->mutable_data(ctx.GetPlace()); - - int padded_length = ctx.Attr("padded_length"); - - phi::funcs::UnpaddingLoDTensorFunctor()( - ctx.template device_context(), - *d_out, - d_x, - padded_length, - 0, - false, - phi::funcs::kBatchLengthWidth); - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc deleted file mode 100644 index 5520cf3227d71e..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc +++ /dev/null @@ -1,213 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" - -#include -#include - -namespace paddle { -namespace operators { - -class SequenceUnpadOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), - true, - phi::errors::NotFound( - "Input(X) of SequenceUnpadOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasInput("Length"), - true, - phi::errors::NotFound( - "Input(Length) of SequenceUnpadOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput("Out"), - true, - phi::errors::NotFound( - "Output(Out) of SequenceUnpadOp should not be null.")); - - auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_GE(x_dims.size(), - 2, - phi::errors::InvalidArgument( - "The rank of Input(X) can't be less than 2. But the " - "rank we received is %d", - x_dims.size())); - - auto len_dims = ctx->GetInputDim("Length"); - PADDLE_ENFORCE_EQ(len_dims.size(), - 1, - phi::errors::InvalidArgument( - "The rank of SequenceUnpadOp Input(Length) should " - "be 1. But the rank we received is %d", - len_dims.size())); - PADDLE_ENFORCE_EQ( - len_dims[0], - x_dims[0], - phi::errors::InvalidArgument( - "The 1st dimension of SequenceUnpadOp Input(X) and Input(Length)" - "should be same. But the 1st dimension of " - "Input(X) is %d, Input(Length) is %d", - x_dims[0], - len_dims[0])); - - int64_t out_dim_0 = -1; - if (ctx->IsRuntime()) { - out_dim_0 = x_dims[0] * x_dims[1]; - } - - std::vector out_dims_vec{out_dim_0}; - if (x_dims.size() == 2) { - out_dims_vec.push_back(1); - } else { - for (int i = 2; i < x_dims.size(); ++i) { - out_dims_vec.push_back(x_dims[i]); - } - } - ctx->SetOutputDim("Out", common::make_ddim(out_dims_vec)); - if (!ctx->IsRuntime()) { - ctx->SetLoDLevel("Out", 1); - } - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(data_type, ctx.GetPlace()); - } -}; - -class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "(LoDTensor, default LoDTensor) Input tensor which " - "contains the padded sequences with equal length."); - AddInput( - "Length", - "(LoDTensor) The input tensor which specifies the actual length of " - "sequences after unpadding."); - AddOutput( - "Out", - "(LoDTensor) The output tensor which contains unpadded sequences."); - AddComment(R"DOC( - Sequence Unpad Operator - - This operator removes the padding data in the input sequences and convert - them into sequences with actual length as output, identified by lod - information. - - Example: - - Given input tensor Input(X): - X.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 13.0, 14.0, 15.0]], -` - in which there are 3 sequences padded to length 5, and the actual length - specified by Input(Length): - - Length.data = [2, 3, 4], - - after unpadding, Output(Out) will be: - - Out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]] - Out.lod = [[0, 2, 5, 9]] - - )DOC"); - } -}; - -class SequenceUnpadGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput("X"), - true, - phi::errors::NotFound( - "Input(X) of SequenceUnpadGradOp should not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), - true, - phi::errors::NotFound( - "Input(Out@GRAD) of SequenceUnpadGradOp should not be null.")); - - if (ctx->HasOutput(framework::GradVarName("X"))) { - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ framework::GradVarName("X")); - } - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")); - return phi::KernelKey(data_type, ctx.GetPlace()); - } -}; - -template -class SequenceUnpadGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("sequence_unpad_grad"); - op->SetAttrMap(this->Attrs()); - op->SetInput("X", this->Input("X")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(SequenceUnpadGradOpNoNeedBufferVarsInferer, - "X"); - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OPERATOR(sequence_unpad, - ops::SequenceUnpadOp, - ops::SequenceUnpadOpMaker, - ops::SequenceUnpadGradOpMaker, - ops::SequenceUnpadGradOpMaker); -REGISTER_OPERATOR(sequence_unpad_grad, - ops::SequenceUnpadGradOp, - ops::SequenceUnpadGradOpNoNeedBufferVarsInferer); -PD_REGISTER_STRUCT_KERNEL(sequence_unpad, - CPU, - ALL_LAYOUT, - ops::SequenceUnpadOpKernel, - float, - double, - int, - int64_t) {} -PD_REGISTER_STRUCT_KERNEL(sequence_unpad_grad, - CPU, - ALL_LAYOUT, - ops::SequenceUnpadGradOpKernel, - float, - double, - int, - int64_t) {} diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu deleted file mode 100644 index 8ba8b380c09761..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" - -namespace ops = paddle::operators; -PD_REGISTER_STRUCT_KERNEL(sequence_unpad, - GPU, - ALL_LAYOUT, - ops::SequenceUnpadOpKernel, - float, - double, - int, - int64_t) {} -PD_REGISTER_STRUCT_KERNEL(sequence_unpad_grad, - GPU, - ALL_LAYOUT, - ops::SequenceUnpadGradOpKernel, - float, - double, - int, - int64_t) {} diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h deleted file mode 100644 index cc38fd510ef1ea..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/memory/memcpy.h" -#include "paddle/phi/kernels/funcs/math_function.h" -#include "paddle/phi/kernels/funcs/sequence_padding.h" - -namespace paddle { -namespace operators { - -using LoDTensor = phi::DenseTensor; -using LoD = framework::LoD; - -template -class SequenceUnpadOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* x_t = ctx.Input("X"); - auto* len_t = ctx.Input("Length"); - auto* out_t = ctx.Output("Out"); - - auto& dev_ctx = ctx.template device_context(); - phi::DenseTensor seq_len_cpu = - ctx.AllocateTmpTensor(len_t->dims(), dev_ctx); - if (platform::is_gpu_place(ctx.GetPlace()) || - platform::is_xpu_place(ctx.GetPlace())) { - seq_len_cpu.mutable_data(platform::CPUPlace()); - framework::TensorCopySync(*len_t, platform::CPUPlace(), &seq_len_cpu); - } else { - seq_len_cpu = *len_t; - } - - const int64_t* seq_len_ptr = seq_len_cpu.data(); - int64_t batch_size = len_t->dims()[0]; - std::vector out_lod0(batch_size + 1, 0); - for (int64_t i = 0; i < batch_size; ++i) { - out_lod0[i + 1] = out_lod0[i] + static_cast(seq_len_ptr[i]); - } - - framework::LoD out_lod; - out_lod.push_back(out_lod0); - out_t->set_lod(out_lod); - std::vector out_dims_vec{static_cast(out_lod0.back())}; - if (x_t->dims().size() == 2) { - out_dims_vec.push_back(1); - } else { - for (int i = 2; i < x_t->dims().size(); ++i) { - out_dims_vec.push_back(x_t->dims()[i]); - } - } - out_t->Resize(common::make_ddim(out_dims_vec)); - - // after set the lod of output, allocate the memory - out_t->mutable_data(ctx.GetPlace()); - - int64_t padded_length = x_t->dims()[1]; - phi::funcs::UnpaddingLoDTensorFunctor()( - dev_ctx, - *x_t, - out_t, - padded_length, - 0, - false, - phi::funcs::kBatchLengthWidth); - } -}; - -template -class SequenceUnpadGradOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* d_x = ctx.Output(framework::GradVarName("X")); - if (d_x) { - const auto* d_out = ctx.Input(framework::GradVarName("Out")); - d_x->mutable_data(ctx.GetPlace()); - - int padded_length = d_x->dims()[1]; - - LoDTensor zero_pads; - zero_pads.Resize({1, 1}); - zero_pads.mutable_data(ctx.GetPlace()); - phi::funcs::SetConstant set_zero; - auto& dev_ctx = ctx.template device_context(); - set_zero(dev_ctx, &zero_pads, static_cast(0)); - - phi::funcs::PaddingLoDTensorFunctor()( - ctx.template device_context(), - *d_out, - d_x, - zero_pads, - padded_length, - 0, - false, - phi::funcs::kBatchLengthWidth); - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc deleted file mode 100644 index c875cdc37e80b1..00000000000000 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_WITH_XPU - -#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" - -namespace ops = paddle::operators; -PD_REGISTER_STRUCT_KERNEL( - sequence_unpad, XPU, ALL_LAYOUT, ops::SequenceUnpadOpKernel, float) {} - -#endif diff --git a/test/cpp/fluid/framework/op_compatible_info_test.cc b/test/cpp/fluid/framework/op_compatible_info_test.cc index 63bad5c25f73d7..fb4fa0cc5350a4 100644 --- a/test/cpp/fluid/framework/op_compatible_info_test.cc +++ b/test/cpp/fluid/framework/op_compatible_info_test.cc @@ -27,8 +27,6 @@ TEST(test_op_compatible_info, test_op_compatible) { comp_map.InitOpCompatibleMap(); ASSERT_NE(comp_map.GetDefaultRequiredVersion(), std::string()); - ASSERT_NE(comp_map.GetOpCompatibleInfo("sequence_pad").required_version_, - std::string()); ASSERT_NE(comp_map.GetOpCompatibleInfo("reshape").required_version_, std::string()); ASSERT_NE(comp_map.GetOpCompatibleInfo("layer_norm").required_version_, @@ -36,19 +34,6 @@ TEST(test_op_compatible_info, test_op_compatible) { ASSERT_NE(comp_map.GetOpCompatibleInfo("layer_xx").required_version_, std::string()); - auto comp_1 = comp_map.IsRequireMiniVersion("sequence_pad", "1.5.0"); - ASSERT_EQ(comp_1, OpCompatibleType::definite_not); - auto comp_2 = comp_map.IsRequireMiniVersion("sequence_pad", "1.6.0"); - ASSERT_EQ(comp_2, OpCompatibleType::compatible); - auto comp_3 = comp_map.IsRequireMiniVersion("sequence_pad", "1.6.1"); - ASSERT_EQ(comp_3, OpCompatibleType::compatible); - auto comp_6 = comp_map.IsRequireMiniVersion("sequence_pad", "1.7.0"); - ASSERT_EQ(comp_6, OpCompatibleType::compatible); - auto comp_7 = comp_map.IsRequireMiniVersion("sequence_pad", "0.7.0"); - ASSERT_EQ(comp_7, OpCompatibleType::definite_not); - auto comp_8 = comp_map.IsRequireMiniVersion("sequence_pad", "2.0.0"); - ASSERT_EQ(comp_8, OpCompatibleType::compatible); - ASSERT_EQ(comp_map.IsRequireMiniVersion("unkop", "2.0.0"), OpCompatibleType::compatible); ASSERT_EQ(comp_map.IsRequireMiniVersion("unkop", "0.7.0"), diff --git a/test/legacy_test/test_zero_dim_sundry_static_api_part3.py b/test/legacy_test/test_zero_dim_sundry_static_api_part3.py index c25bdead36e1e5..146b5811c0cc7a 100644 --- a/test/legacy_test/test_zero_dim_sundry_static_api_part3.py +++ b/test/legacy_test/test_zero_dim_sundry_static_api_part3.py @@ -356,21 +356,6 @@ def test_t(self): self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, ()) - @prog_scope() - def test_sequence_pad(self): - x = paddle.static.data("x", [-1, 2], dtype=paddle.int64, lod_level=1) - value = paddle.to_tensor(1000, dtype=paddle.int64).squeeze() - out = paddle.static.nn.sequence_pad(x, value) - - x_tensor = paddle.base.create_lod_tensor( - np.arange(20).astype(np.int64).reshape(-1, 2), - [[3, 3, 4]], - place=self.exe.place, - ) - prog = paddle.static.default_main_program() - res = self.exe.run(prog, feed={"x": x_tensor}, fetch_list=[out]) - self.assertEqual(res[0].shape, (3, 4, 2)) - @test_with_pir_api @prog_scope() def test_static_data(self): diff --git a/test/sequence/test_sequence_pad_op.py b/test/sequence/test_sequence_pad_op.py deleted file mode 100644 index 743e21dce5c3e7..00000000000000 --- a/test/sequence/test_sequence_pad_op.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import numpy as np -from op_test import OpTest - -import paddle - - -class TestSequencePadOp(OpTest): - def set_attr(self): - self.x_shape = [12, 10] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = [1.0] - self.padded_length = -1 - self.dtype = 'float64' - - def set_data(self): - x_data = np.random.uniform(0.1, 0.5, self.x_shape).astype(self.dtype) - pad_value_data = np.array(self.pad_value).astype(self.dtype) - self.inputs = { - 'X': (x_data, self.x_len_lod), - 'PadValue': pad_value_data, - } - self.attrs = {'padded_length': self.padded_length} - - def compute(self): - # get padded length - padded_length = self.padded_length - x_len_lod_0 = self.x_len_lod[0] - if padded_length == -1: - max_seq_len = 0 - for l in x_len_lod_0: - max_seq_len = max(max_seq_len, l) - padded_length = max_seq_len - - # do padding - x_data = self.inputs['X'][0] - pad_value_data = self.inputs['PadValue'] - if pad_value_data.shape == (1,): - pad_value_data = np.broadcast_to( - pad_value_data, shape=x_data.shape[1:] - ) - padded_sequences = [] - start_idx = 0 - for l in x_len_lod_0: - end_idx = start_idx + l - seq = x_data[start_idx:end_idx] - to_pad_len = padded_length - l - for _ in range(to_pad_len): - seq = np.append(seq, pad_value_data[np.newaxis, :], axis=0) - padded_sequences.append(seq) - start_idx = end_idx - - out_data = np.array(padded_sequences) - length = np.array(self.x_len_lod[0]).reshape(-1) - self.outputs = {'Out': out_data, 'Length': length} - - def setUp(self): - self.op_type = 'sequence_pad' - self.set_attr() - self.set_data() - self.compute() - - def test_check_output(self): - self.check_output(check_dygraph=False) - - def test_check_grad(self): - self.check_grad(["X"], "Out", check_dygraph=False) - - -class TestSequencePadOp2(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 10] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = np.random.random(10) - self.padded_length = -1 - self.dtype = 'float64' - - -class TestSequencePadOp3(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 10] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = [1.0] - self.padded_length = 7 - self.dtype = 'float64' - - -class TestSequencePadOp4(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 10] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = np.random.random(10) - self.padded_length = 7 - self.dtype = 'float64' - - -class TestSequencePadOp5(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 2, 5] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = [1.0] - self.padded_length = -1 - self.dtype = 'float64' - - -class TestSequencePadOp6(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 2, 5] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = np.random.random((2, 5)) - self.padded_length = -1 - self.dtype = 'float64' - - -class TestSequencePadOp7(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 2, 5] - self.x_len_lod = [[2, 3, 4, 3]] - self.pad_value = [1.0] - self.padded_length = 7 - self.dtype = 'float64' - - -class TestSequencePadOp8(TestSequencePadOp): - def set_attr(self): - self.x_shape = [12, 2, 5] - self.x_len_lod = [[0, 8, 0, 4, 0]] - self.pad_value = [1.0] - self.padded_length = 10 - self.dtype = 'float64' - - -class TestSequencePadOpError(unittest.TestCase): - def test_error(self): - def test_x_variable(): - # the input x type must be Variable - x = np.random.random((2, 4)).astype("float32") - - pad_value = paddle.assign(np.array([0.0], dtype=np.float32)) - paddle.static.nn.sequence_lod.sequence_pad(x=x, pad_value=pad_value) - - self.assertRaises(TypeError, test_x_variable) - - def test_pad_value_variable(): - x1 = paddle.static.data( - name='x1', shape=[-1, 10, 5], dtype='float32', lod_level=1 - ) - pad_value1 = np.array([0.0], dtype=np.float32) - paddle.static.nn.sequence_lod.sequence_pad( - x=x1, pad_value=pad_value1 - ) - - self.assertRaises(TypeError, test_pad_value_variable) - - def test_dtype(): - x2 = paddle.static.data( - name='x2', shape=[-1, 10, 5], dtype='int16', lod_level=1 - ) - - pad_value2 = paddle.assign(np.array([0.0], dtype=np.int32)) - paddle.static.nn.sequence_lod.sequence_pad( - x=x2, pad_value=pad_value2 - ) - - self.assertRaises(TypeError, test_dtype) - - def test_length_dtype(self): - x = paddle.static.data( - name='x', shape=[10, 5], dtype='float32', lod_level=1 - ) - - pad_value = paddle.assign(np.array([0.0], dtype=np.float32)) - out, length = paddle.static.nn.sequence_lod.sequence_pad( - x=x, pad_value=pad_value - ) - # check if the dtype of length is int64 in compile time - self.assertEqual(length.dtype, paddle.int64) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/sequence/test_sequence_unpad_op.py b/test/sequence/test_sequence_unpad_op.py deleted file mode 100644 index 9eaaff04e5fdf4..00000000000000 --- a/test/sequence/test_sequence_unpad_op.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import numpy as np -from op_test import OpTest - -import paddle - - -class TestSequenceUnpadOp(OpTest): - def init(self): - self.length = [2, 3, 4] - self.x_shape = (3, 40) - self.dtype = "float64" - - def compute(self): - assert len(self.length) == self.x_shape[0] - x = np.random.random(self.x_shape).astype(self.dtype) - out_lod = [self.length] - - out = x[0, 0 : self.length[0]] - for i in range(1, x.shape[0]): - out = np.append(out, x[i, 0 : self.length[i]], axis=0) - - out_shape = (sum(self.length),) - if len(self.x_shape) == 2: - out_shape = out_shape + (1,) - else: - out_shape = out_shape + self.x_shape[2:] - - self.inputs = {'X': x, 'Length': np.array(self.length).astype('int64')} - self.outputs = {'Out': (out.reshape(out_shape), out_lod)} - - def setUp(self): - self.op_type = 'sequence_unpad' - self.init() - self.compute() - - def test_check_output(self): - self.check_output(check_dygraph=False) - - def test_check_grad(self): - self.check_grad(["X"], "Out", check_dygraph=False) - - -class TestSequenceUnpadOp2(TestSequenceUnpadOp): - def init(self): - self.length = [2, 3, 4] - self.x_shape = (3, 5, 4, 3) - self.dtype = "float64" - - -class TestSequenceUnpadOp3(TestSequenceUnpadOp): - def init(self): - self.length = [5, 2, 3, 4] - self.x_shape = (4, 5, 3, 3, 6) - self.dtype = "float64" - - -class TestSequenceUnpadOp4(TestSequenceUnpadOp): - def init(self): - self.length = [5, 0, 0, 4] - self.x_shape = (4, 5, 3, 3, 6) - self.dtype = "float64" - - -class TestSequenceUnpadOp5(TestSequenceUnpadOp): - def init(self): - self.length = [0, 4, 3, 0] - self.x_shape = (4, 5, 3, 3, 6) - self.dtype = "float64" - - -class TestSequenceUnpadOpError(unittest.TestCase): - def test_error(self): - def test_x_variable(): - x = np.random.random((10, 5)).astype("float64") - len = paddle.static.data(name='length2', shape=[10], dtype='int64') - paddle.static.nn.sequence_lod.sequence_pad(x=x, length=len) - - self.assertRaises(TypeError, test_x_variable) - - def test_length_variable(): - x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32') - len1 = np.random.random(10).astype("int64") - paddle.static.nn.sequence_lod.sequence_pad(x=x1, length=len1) - - self.assertRaises(TypeError, test_length_variable) - - def test_x_dtype(): - x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16') - len2 = paddle.static.data(name='length2', shape=[10], dtype='int64') - paddle.static.nn.sequence_lod.sequence_pad(x=x2, length=len2) - - self.assertRaises(TypeError, test_x_dtype) - - def test_length_dtype(): - x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64') - len3 = paddle.static.data(name='length3', shape=[10], dtype='int32') - paddle.static.nn.sequence_lod.sequence_pad(x=x3, length=len3) - - self.assertRaises(TypeError, test_length_dtype) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/xpu/test_sequence_unpad_op_xpu.py b/test/xpu/test_sequence_unpad_op_xpu.py deleted file mode 100644 index 0a61d8b22ec96b..00000000000000 --- a/test/xpu/test_sequence_unpad_op_xpu.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from get_test_cover_info import ( - XPUOpTestWrapper, - create_test_class, - get_xpu_op_support_types, -) -from op_test_xpu import XPUOpTest - -import paddle - -paddle.enable_static() - - -class XPUTestSequenceUnpadOp(XPUOpTestWrapper): - def __init__(self): - self.op_name = 'sequence_unpad' - self.use_dynamic_create_class = False - - class TestSequenceUnpadOp(XPUOpTest): - def setUp(self): - self.init_dtype() - self.initTestCase() - self.set_xpu() - self.op_type = 'sequence_unpad' - self.place = paddle.XPUPlace(0) - self.compute() - - def init_dtype(self): - self.dtype = self.in_type - - def set_xpu(self): - self.__class__.use_xpu = True - self.__class__.no_need_check_grad = True - - def test_check_output(self): - self.check_output_with_place(self.place) - - def initTestCase(self): - self.length = [2, 3, 4] - self.x_shape = (3, 40) - - def compute(self): - assert len(self.length) == self.x_shape[0] - x = np.random.random(self.x_shape).astype(self.dtype) - out_lod = [self.length] - - out = x[0, 0 : self.length[0]] - for i in range(1, x.shape[0]): - out = np.append(out, x[i, 0 : self.length[i]], axis=0) - - out_shape = (sum(self.length),) - if len(self.x_shape) == 2: - out_shape = out_shape + (1,) - else: - out_shape = out_shape + self.x_shape[2:] - - self.inputs = { - 'X': x, - 'Length': np.array(self.length).astype('int64'), - } - self.outputs = {'Out': (out.reshape(out_shape), out_lod)} - - class TestSequenceUnpadOp2(TestSequenceUnpadOp): - def initTestCase(self): - self.length = [2, 3, 4] - self.x_shape = (3, 5, 4, 3) - - class TestSequenceUnpadOp3(TestSequenceUnpadOp): - def initTestCase(self): - self.length = [5, 2, 3, 4] - self.x_shape = (4, 5, 3, 3, 6) - - class TestSequenceUnpadOp4(TestSequenceUnpadOp): - def initTestCase(self): - self.length = [5, 5, 5, 5] - self.x_shape = (4, 5, 3, 3, 6) - - class TestSequenceUnpadOp5(TestSequenceUnpadOp): - def initTestCase(self): - self.length = [1, 4, 3, 1] - self.x_shape = (4, 5, 3, 3, 6) - - -class TestSequenceUnpadOpError(unittest.TestCase): - def test_error(self): - """ - The type of 'x' in paddle.static.nn.sequence_unpad must be , but received . - """ - - def test_x_variable(): - x = np.random.random((10, 5)).astype("float64") - len = paddle.static.data(name='length2', shape=[10], dtype='int64') - paddle.static.nn.sequence_lod.sequence_unpad(x=x, length=len) - - self.assertRaises(TypeError, test_x_variable) - """ - The type of 'length' in base.layers.sequence_unpad must be , but received . - """ - - def test_length_variable(): - x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32') - len1 = np.random.random(10).astype("int64") - paddle.static.nn.sequence_lod.sequence_unpad(x=x1, length=len1) - - self.assertRaises(TypeError, test_length_variable) - """ - The data type of 'x' in base.layers.sequence_unpad must be ['float32', 'float64', 'int32', 'int64'], but received float16 - """ - - def test_x_dtype(): - x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16') - len2 = paddle.static.data(name='length2', shape=[10], dtype='int64') - paddle.static.nn.sequence_lod.sequence_unpad(x=x2, length=len2) - - self.assertRaises(TypeError, test_x_dtype) - """ - The data type of 'length' in base.layers.sequence_unpad must be ['int64'], but received int32 - """ - - def test_length_dtype(): - x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64') - len3 = paddle.static.data(name='length3', shape=[10], dtype='int32') - paddle.static.nn.sequence_lod.sequence_unpad(x=x3, length=len3) - - self.assertRaises(TypeError, test_length_dtype) - - -support_types = get_xpu_op_support_types('sequence_unpad') -for stype in support_types: - create_test_class(globals(), XPUTestSequenceUnpadOp, stype) - -if __name__ == '__main__': - unittest.main()