Skip to content

Commit 8352ce9

Browse files
authored
DenseTensor (PaddlePaddle#48419)
1 parent 8d81de2 commit 8352ce9

19 files changed

+195
-203
lines changed

paddle/fluid/operators/sequence_ops/sequence_conv_op.cc

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -145,30 +145,31 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker {
145145
void Make() override {
146146
AddInput(
147147
"X",
148-
"(LoDTensor) the input(X) is a LodTensor, which supports "
148+
"(phi::DenseTensor) the input(X) is a LodTensor, which supports "
149149
"variable-time length input sequence. The underlying tensor in "
150-
"this LoDTensor is a matrix with shape (T, N), where T is the "
150+
"this phi::DenseTensor is a matrix with shape (T, N), where T is the "
151151
"total time steps in this mini-batch and N is the input_hidden_size.");
152-
AddInput("PaddingData",
153-
"(Tensor, optional) the input(PaddingData) is an optional "
154-
"parameter, and it is learnable. "
155-
"This is a tensor with shape (P, N), where P is the "
156-
"top_pad + bottom_pad, N is the input_hidden_size. In order to "
157-
"ensure the equal length of sequence before and after "
158-
"convolution, it is necessary to fill the top and bottom of each "
159-
"sequence according to context_length, context_stride and "
160-
"context_start")
152+
AddInput(
153+
"PaddingData",
154+
"(phi::DenseTensor, optional) the input(PaddingData) is an optional "
155+
"parameter, and it is learnable. "
156+
"This is a tensor with shape (P, N), where P is the "
157+
"top_pad + bottom_pad, N is the input_hidden_size. In order to "
158+
"ensure the equal length of sequence before and after "
159+
"convolution, it is necessary to fill the top and bottom of each "
160+
"sequence according to context_length, context_stride and "
161+
"context_start")
161162
.AsDispensable();
162163
AddInput(
163164
"Filter",
164-
"(Tensor) the input(Filter) is an learnable parameter."
165+
"(phi::DenseTensor) the input(Filter) is an learnable parameter."
165166
"This is a tensor with shape (K, M), where K is the "
166167
"context_length * input_hidden_size, M is the output feature size.");
167168
AddOutput(
168169
"Out",
169-
"(LoDTensor) the output(Out) is a LodTensor, which support "
170+
"(phi::DenseTensor) the output(Out) is a LodTensor, which support "
170171
"variable-time length output sequence. The underlying tensor in "
171-
"this LoDTensor is a matrix with shape (T, M), where, T is the "
172+
"this phi::DenseTensor is a matrix with shape (T, M), where, T is the "
172173
"total time steps in this mini-batch, M is the output feature size.");
173174

174175
AddAttr<bool>("paddingTrainable",

paddle/fluid/operators/sequence_ops/sequence_conv_op.h

Lines changed: 16 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,12 @@ limitations under the License. */
2222
namespace paddle {
2323
namespace operators {
2424

25-
using Tensor = phi::DenseTensor;
26-
using LoDTensor = phi::DenseTensor;
27-
2825
template <typename DeviceContext, typename T>
2926
class SequenceConvKernel : public framework::OpKernel<T> {
3027
public:
3128
void Compute(const framework::ExecutionContext& context) const override {
32-
auto* in = context.Input<LoDTensor>("X");
33-
auto* out = context.Output<LoDTensor>("Out");
29+
auto* in = context.Input<phi::DenseTensor>("X");
30+
auto* out = context.Output<phi::DenseTensor>("Out");
3431
auto filter = *context.Input<phi::DenseTensor>("Filter");
3532

3633
out->mutable_data<T>(context.GetPlace());
@@ -40,11 +37,11 @@ class SequenceConvKernel : public framework::OpKernel<T> {
4037
int context_stride = context.Attr<int>("contextStride");
4138
bool padding_trainable = context.Attr<bool>("paddingTrainable");
4239

43-
PADDLE_ENFORCE_EQ(
44-
in->lod().empty(),
45-
false,
46-
platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp "
47-
"does not contain LoD information."));
40+
PADDLE_ENFORCE_EQ(in->lod().empty(),
41+
false,
42+
platform::errors::InvalidArgument(
43+
"Input(X) phi::DenseTensor of SequenceConvOp "
44+
"does not contain LoD information."));
4845
PADDLE_ENFORCE_EQ(
4946
in->lod().size(),
5047
1UL,
@@ -64,7 +61,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
6461

6562
framework::DDim col_shape = {in->dims()[0],
6663
context_length * sequence_width};
67-
Tensor col;
64+
phi::DenseTensor col;
6865
col.mutable_data<T>(col_shape, context.GetPlace());
6966
// Because if padding_trainable is false, padding data should be zeros.
7067
phi::funcs::SetConstant<DeviceContext, T> set_zero;
@@ -92,13 +89,14 @@ template <typename DeviceContext, typename T>
9289
class SequenceConvGradKernel : public framework::OpKernel<T> {
9390
public:
9491
void Compute(const framework::ExecutionContext& context) const override {
95-
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
96-
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
92+
auto* in_g = context.Output<phi::DenseTensor>(framework::GradVarName("X"));
93+
auto* out_g =
94+
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
9795
auto* filter_g =
9896
context.Output<phi::DenseTensor>(framework::GradVarName("Filter"));
9997
auto* padding_data_g =
10098
context.Output<phi::DenseTensor>(framework::GradVarName("PaddingData"));
101-
auto* in = context.Input<LoDTensor>("X");
99+
auto* in = context.Input<phi::DenseTensor>("X");
102100
auto* filter = context.Input<phi::DenseTensor>("Filter");
103101

104102
int context_start = context.Attr<int>("contextStart");
@@ -125,7 +123,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
125123
// use col_shape in the im2col calculation
126124
framework::DDim col_shape = {in->dims()[0],
127125
sequence_width * context_length};
128-
Tensor col;
126+
phi::DenseTensor col;
129127

130128
if (in_g || filter_g || (padding_trainable && padding_data_g)) {
131129
col.mutable_data<T>(col_shape, context.GetPlace());
@@ -159,7 +157,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
159157
padding_data_g->mutable_data<T>(context.GetPlace());
160158
set_zero(dev_ctx, padding_data_g, static_cast<T>(0));
161159

162-
LoDTensor* input = const_cast<LoDTensor*>(in);
160+
phi::DenseTensor* input = const_cast<phi::DenseTensor*>(in);
163161
seq_project_grad_functor(dev_ctx,
164162
*input,
165163
padding_trainable,
@@ -178,8 +176,8 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
178176
filter_g->mutable_data<T>(context.GetPlace());
179177
set_zero(dev_ctx, filter_g, static_cast<T>(0));
180178

181-
Tensor filter_grad = *filter_g;
182-
LoDTensor out_grad = *out_g;
179+
phi::DenseTensor filter_grad = *filter_g;
180+
phi::DenseTensor out_grad = *out_g;
183181

184182
const phi::DenseTensor* padding_data = nullptr;
185183
if (padding_trainable) {

paddle/fluid/operators/sequence_ops/sequence_conv_op_xpu.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,13 @@ limitations under the License. */
1919

2020
namespace paddle {
2121
namespace operators {
22-
using Tensor = phi::DenseTensor;
2322

2423
template <typename DeviceContext, typename T>
2524
class SequenceConvXPUKernel : public framework::OpKernel<T> {
2625
public:
2726
void Compute(const framework::ExecutionContext& context) const override {
28-
auto* in = context.Input<LoDTensor>("X");
29-
auto* out = context.Output<LoDTensor>("Out");
27+
auto* in = context.Input<phi::DenseTensor>("X");
28+
auto* out = context.Output<phi::DenseTensor>("Out");
3029
auto filter = *context.Input<phi::DenseTensor>("Filter");
3130

3231
out->mutable_data<T>(context.GetPlace());
@@ -36,11 +35,11 @@ class SequenceConvXPUKernel : public framework::OpKernel<T> {
3635
int context_stride = context.Attr<int>("contextStride");
3736
bool padding_trainable = context.Attr<bool>("paddingTrainable");
3837

39-
PADDLE_ENFORCE_EQ(
40-
in->lod().empty(),
41-
false,
42-
platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp "
43-
"does not contain LoD information."));
38+
PADDLE_ENFORCE_EQ(in->lod().empty(),
39+
false,
40+
platform::errors::InvalidArgument(
41+
"Input(X) phi::DenseTensor of SequenceConvOp "
42+
"does not contain LoD information."));
4443
PADDLE_ENFORCE_EQ(
4544
in->lod().size(),
4645
1UL,
@@ -159,23 +158,24 @@ template <typename DeviceContext, typename T>
159158
class SequenceConvGradXPUKernel : public framework::OpKernel<T> {
160159
public:
161160
void Compute(const framework::ExecutionContext& context) const override {
162-
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
163-
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
161+
auto* in_g = context.Output<phi::DenseTensor>(framework::GradVarName("X"));
162+
auto* out_g =
163+
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
164164
auto* filter_g =
165165
context.Output<phi::DenseTensor>(framework::GradVarName("Filter"));
166-
auto* in = context.Input<LoDTensor>("X");
166+
auto* in = context.Input<phi::DenseTensor>("X");
167167
auto* filter = context.Input<phi::DenseTensor>("Filter");
168168

169169
int context_start = context.Attr<int>("contextStart");
170170
int context_length = context.Attr<int>("contextLength");
171171
int context_stride = context.Attr<int>("contextStride");
172172
bool padding_trainable = context.Attr<bool>("paddingTrainable");
173173

174-
PADDLE_ENFORCE_EQ(
175-
in->lod().empty(),
176-
false,
177-
platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp "
178-
"does not contain LoD information."));
174+
PADDLE_ENFORCE_EQ(in->lod().empty(),
175+
false,
176+
platform::errors::InvalidArgument(
177+
"Input(X) phi::DenseTensor of SequenceConvOp "
178+
"does not contain LoD information."));
179179
PADDLE_ENFORCE_EQ(
180180
in->lod().size(),
181181
1UL,

paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker {
3636
public:
3737
void Make() override {
3838
AddInput("X",
39-
"(2-D LoDTensor with the 2nd dimension equal to 1) "
40-
"Input LoDTensor of SequenceEnumerate operator.");
39+
"(2-D phi::DenseTensor with the 2nd dimension equal to 1) "
40+
"Input phi::DenseTensor of SequenceEnumerate operator.");
4141
AddOutput("Out",
42-
"(2-D LoDTensor with the 2nd dimension equal to win_size) "
43-
"Output LoDTensor of SequenceEnumerate operator.");
42+
"(2-D phi::DenseTensor with the 2nd dimension equal to win_size) "
43+
"Output phi::DenseTensor of SequenceEnumerate operator.");
4444
AddAttr<int>("win_size", "(int) The enumerate sequence window size.")
4545
.AddCustomChecker([](const int& win_size) {
4646
PADDLE_ENFORCE_GE(win_size,

paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
namespace paddle {
2222
namespace operators {
2323
using phi::PADDLE_CUDA_NUM_THREADS;
24-
using LoDTensor = phi::DenseTensor;
2524

2625
template <typename T>
2726
__global__ void CalcOutPut(const T* in_data,
@@ -52,8 +51,8 @@ template <typename T>
5251
class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
5352
public:
5453
void Compute(const framework::ExecutionContext& context) const override {
55-
auto* in = context.Input<LoDTensor>("X");
56-
auto* out = context.Output<LoDTensor>("Out");
54+
auto* in = context.Input<phi::DenseTensor>("X");
55+
auto* out = context.Output<phi::DenseTensor>("Out");
5756
int win_size = context.Attr<int>("win_size");
5857
int pad_value = context.Attr<int>("pad_value");
5958

paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,22 +18,21 @@
1818

1919
namespace paddle {
2020
namespace operators {
21-
using LoDTensor = phi::DenseTensor;
2221

2322
template <typename DeviceContext, typename T>
2423
class SequenceEnumerateKernel : public framework::OpKernel<T> {
2524
public:
2625
void Compute(const framework::ExecutionContext& context) const override {
27-
auto* in = context.Input<LoDTensor>("X");
28-
auto* out = context.Output<LoDTensor>("Out");
26+
auto* in = context.Input<phi::DenseTensor>("X");
27+
auto* out = context.Output<phi::DenseTensor>("Out");
2928
int win_size = context.Attr<int>("win_size");
3029
auto pad_value = static_cast<T>(context.Attr<int>("pad_value"));
3130

3231
PADDLE_ENFORCE_EQ(
3332
in->lod().empty(),
3433
false,
3534
platform::errors::InvalidArgument(
36-
"Input(X) Tensor of SequenceEnumerateOp does not contain "
35+
"Input(X) phi::DenseTensor of SequenceEnumerateOp does not contain "
3736
"LoD information."));
3837

3938
auto in_dims = in->dims();

paddle/fluid/operators/sequence_ops/sequence_erase_op.cc

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -27,20 +27,21 @@ class SequenceEraseOp : public framework::OperatorWithKernel {
2727
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase");
2828
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase");
2929
auto x_dims = ctx->GetInputDim("X");
30-
PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1,
31-
platform::errors::InvalidArgument(
32-
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
33-
"with the 2nd dimension equal to 1,"
34-
"but received size %d with the 2nd dimension %d.",
35-
x_dims.size(),
36-
x_dims[1]));
30+
PADDLE_ENFORCE(
31+
x_dims.size() == 2 && x_dims[1] == 1,
32+
platform::errors::InvalidArgument(
33+
"Input(X) of SequenceEraseOp should be a 2-D phi::DenseTensor "
34+
"with the 2nd dimension equal to 1,"
35+
"but received size %d with the 2nd dimension %d.",
36+
x_dims.size(),
37+
x_dims[1]));
3738
ctx->SetOutputDim("Out", x_dims);
38-
// The output LoDTensor's lod_level should be input X's lod_level.
39+
// The output phi::DenseTensor's lod_level should be input X's lod_level.
3940
// For compile-time, we call SetLoDLevel to set output's lod_level.
40-
// For runtime, output LoDTensor's lod is determined by input X's lod and
41-
// the level specified by input RandTable.
42-
// We cannot get X's detail lod and RankTable's level in this function, so
43-
// leave this work to the detail kernel implementation.
41+
// For runtime, output phi::DenseTensor's lod is determined by input X's lod
42+
// and the level specified by input RandTable. We cannot get X's detail lod
43+
// and RankTable's level in this function, so leave this work to the detail
44+
// kernel implementation.
4445
if (!ctx->IsRuntime()) {
4546
ctx->SetLoDLevel("Out", ctx->GetLoDLevel("X"));
4647
}
@@ -51,11 +52,11 @@ class SequenceEraseOpMaker : public framework::OpProtoAndCheckerMaker {
5152
public:
5253
void Make() override {
5354
AddInput("X",
54-
"(2-D LoDTensor with the 2nd dim. equal to 1) "
55-
"Input LoDTensor of SequenceEraseOp.");
55+
"(2-D phi::DenseTensor with the 2nd dim. equal to 1) "
56+
"Input phi::DenseTensor of SequenceEraseOp.");
5657
AddOutput("Out",
57-
"(2-D LoDTensor with the 2nd dim. equal to 1) "
58-
"Output LoDTensor of SequenceEraseOp.");
58+
"(2-D phi::DenseTensor with the 2nd dim. equal to 1) "
59+
"Output phi::DenseTensor of SequenceEraseOp.");
5960
AddAttr<std::vector<int>>("tokens",
6061
"(vector<int>) Tokens need to be erased from "
6162
"input sequences.");
@@ -64,7 +65,7 @@ Sequence Erase Operator.
6465
6566
Sequence erase operator erases tokens specified by Attr(tokens) from the input
6667
sequences Input(X), and outputs the remaining data and modifies the LoD
67-
information at the same time. For example, given a 2-D LoDTensor
68+
information at the same time. For example, given a 2-D phi::DenseTensor
6869
6970
X = [[2, 2, 6, 1, 3, 9, 6, 1, 0, 1]]^T
7071
@@ -77,7 +78,7 @@ operation, the three sequences become
7778
7879
X1' = [[6]]^T, X2' = [[1, 9]]^T and X3' = [[6, 1, 0, 1]]^T.
7980
80-
Hence the LoDTensor Output(Out) should be
81+
Hence the phi::DenseTensor Output(Out) should be
8182
8283
Out = [[6, 1, 9, 6, 1, 0, 1]]^T,
8384

paddle/fluid/operators/sequence_ops/sequence_erase_op.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ limitations under the License. */
2121
namespace paddle {
2222
namespace operators {
2323
using phi::PADDLE_CUDA_NUM_THREADS;
24-
using LoDTensor = phi::DenseTensor;
2524

2625
template <typename T>
2726
__global__ void LabelErasedIdx(const T* in_dat,
@@ -67,8 +66,8 @@ template <typename T>
6766
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
6867
public:
6968
void Compute(const framework::ExecutionContext& ctx) const override {
70-
auto* in = ctx.Input<LoDTensor>("X");
71-
auto* out = ctx.Output<LoDTensor>("Out");
69+
auto* in = ctx.Input<phi::DenseTensor>("X");
70+
auto* out = ctx.Output<phi::DenseTensor>("Out");
7271

7372
auto lod = in->lod();
7473
PADDLE_ENFORCE_EQ(

0 commit comments

Comments
 (0)