Skip to content

Commit 388c69f

Browse files
YuxiangLuxymyeah
andauthored
[NPU] squeeze and unsqueeze op for ascend (#31452)
Co-authored-by: root <[email protected]>
1 parent c956c03 commit 388c69f

File tree

4 files changed

+273
-0
lines changed

4 files changed

+273
-0
lines changed
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#ifdef PADDLE_WITH_ASCEND_CL
16+
#include <memory>
17+
#include <string>
18+
19+
#include "paddle/fluid/operators/squeeze_op.h"
20+
#include "paddle/fluid/operators/npu_op_runner.h"
21+
22+
namespace ops = paddle::operators;
23+
namespace plat = paddle::platform;
24+
25+
REGISTER_OP_NPU_KERNEL(
26+
squeeze,
27+
ops::SqueezeKernel<plat::NPUDeviceContext, float>,
28+
ops::SqueezeKernel<plat::NPUDeviceContext, double>,
29+
ops::SqueezeKernel<plat::NPUDeviceContext, plat::float16>,
30+
ops::SqueezeKernel<plat::NPUDeviceContext, bool>,
31+
ops::SqueezeKernel<plat::NPUDeviceContext, int>,
32+
ops::SqueezeKernel<plat::NPUDeviceContext, uint8_t>,
33+
ops::SqueezeKernel<plat::NPUDeviceContext, int8_t>,
34+
ops::SqueezeKernel<plat::NPUDeviceContext, int64_t>);
35+
REGISTER_OP_NPU_KERNEL(
36+
squeeze2,
37+
ops::SqueezeKernel<plat::NPUDeviceContext, float>,
38+
ops::SqueezeKernel<plat::NPUDeviceContext, double>,
39+
ops::SqueezeKernel<plat::NPUDeviceContext, plat::float16>,
40+
ops::SqueezeKernel<plat::NPUDeviceContext, bool>,
41+
ops::SqueezeKernel<plat::NPUDeviceContext, int>,
42+
ops::SqueezeKernel<plat::NPUDeviceContext, uint8_t>,
43+
ops::SqueezeKernel<plat::NPUDeviceContext, int8_t>,
44+
ops::SqueezeKernel<plat::NPUDeviceContext, int64_t>);
45+
#endif
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#ifndef _WIN32
16+
#include <unistd.h>
17+
#endif
18+
19+
#include <string>
20+
#include <thread> // NOLINT
21+
#include <vector>
22+
23+
#include "gtest/gtest.h"
24+
#include "paddle/fluid/framework/op_registry.h"
25+
#include "paddle/fluid/framework/operator.h"
26+
#include "paddle/fluid/framework/program_desc.h"
27+
#include "paddle/fluid/operators/dropout_op.h"
28+
#include "paddle/fluid/operators/math/math_function.h"
29+
#include "paddle/fluid/string/printf.h"
30+
31+
namespace f = paddle::framework;
32+
namespace p = paddle::platform;
33+
namespace m = paddle::operators::math;
34+
35+
USE_OP(squeeze);
36+
USE_OP_DEVICE_KERNEL(squeeze, NPU);
37+
38+
template <typename T>
39+
void Compare(f::Scope* scope, const p::DeviceContext& ctx) {
40+
// init
41+
auto x = scope->Var("X");
42+
auto tensor_x = x->GetMutable<f::LoDTensor>();
43+
44+
int dim0 = 1;
45+
int dim1 = 10;
46+
int dim2 = 1;
47+
48+
std::vector<T> init;
49+
for (int64_t i = 0; i < dim0 * dim1 * dim2; ++i) {
50+
init.push_back(static_cast<T>(0.1));
51+
}
52+
53+
TensorFromVector(init, ctx, tensor_x);
54+
tensor_x->Resize({dim0, dim1, dim2});
55+
56+
ctx.Wait();
57+
58+
// run
59+
auto place = ctx.GetPlace();
60+
auto out = scope->Var("Out");
61+
auto tensor_out = out->GetMutable<f::LoDTensor>();
62+
63+
std::vector<int> axis;
64+
axis.push_back(2);
65+
f::AttributeMap attrs = {{"axes", axis}};
66+
67+
auto op =
68+
f::OpRegistry::CreateOp("squeeze", {{"X", {"X"}}},
69+
{{"Out", {"Out"}}}, attrs);
70+
71+
op->Run(*scope, place);
72+
ctx.Wait();
73+
74+
EXPECT_EQ((uint32_t)tensor_out->dims().size(), uint32_t(2));
75+
EXPECT_EQ((uint32_t)tensor_out->dims()[0], uint32_t(dim0));
76+
EXPECT_EQ((uint32_t)tensor_out->dims()[1], uint32_t(dim1));
77+
78+
std::vector<T> out_vec;
79+
TensorToVector(*tensor_out, ctx, &out_vec);
80+
for (uint32_t i = 0; i < out_vec.size(); i++) {
81+
EXPECT_EQ(out_vec[i], static_cast<T>(0.1));
82+
}
83+
84+
ctx.Wait();
85+
}
86+
87+
TEST(squeeze, NPU_fp32) {
88+
f::Scope scope;
89+
p::NPUDeviceContext ctx(p::NPUPlace(0));
90+
Compare<float>(&scope, ctx);
91+
}
92+
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#ifdef PADDLE_WITH_ASCEND_CL
16+
#include <memory>
17+
#include <string>
18+
19+
#include "paddle/fluid/operators/unsqueeze_op.h"
20+
#include "paddle/fluid/operators/npu_op_runner.h"
21+
22+
namespace ops = paddle::operators;
23+
namespace plat = paddle::platform;
24+
25+
REGISTER_OP_NPU_KERNEL(
26+
unsqueeze,
27+
ops::UnsqueezeKernel<plat::NPUDeviceContext, float>,
28+
ops::UnsqueezeKernel<plat::NPUDeviceContext, double>,
29+
ops::UnsqueezeKernel<plat::NPUDeviceContext, plat::float16>,
30+
ops::UnsqueezeKernel<plat::NPUDeviceContext, bool>,
31+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int>,
32+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int8_t>,
33+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int64_t>);
34+
REGISTER_OP_NPU_KERNEL(
35+
unsqueeze2,
36+
ops::UnsqueezeKernel<plat::NPUDeviceContext, float>,
37+
ops::UnsqueezeKernel<plat::NPUDeviceContext, double>,
38+
ops::UnsqueezeKernel<plat::NPUDeviceContext, plat::float16>,
39+
ops::UnsqueezeKernel<plat::NPUDeviceContext, bool>,
40+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int>,
41+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int8_t>,
42+
ops::UnsqueezeKernel<plat::NPUDeviceContext, int64_t>);
43+
#endif
44+
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#ifndef _WIN32
16+
#include <unistd.h>
17+
#endif
18+
19+
#include <string>
20+
#include <thread> // NOLINT
21+
#include <vector>
22+
23+
#include "gtest/gtest.h"
24+
#include "paddle/fluid/framework/op_registry.h"
25+
#include "paddle/fluid/framework/operator.h"
26+
#include "paddle/fluid/framework/program_desc.h"
27+
#include "paddle/fluid/operators/dropout_op.h"
28+
#include "paddle/fluid/operators/math/math_function.h"
29+
#include "paddle/fluid/string/printf.h"
30+
31+
namespace f = paddle::framework;
32+
namespace p = paddle::platform;
33+
namespace m = paddle::operators::math;
34+
35+
USE_OP(unsqueeze);
36+
USE_OP_DEVICE_KERNEL(unsqueeze, NPU);
37+
38+
template <typename T>
39+
void Compare(f::Scope* scope, const p::DeviceContext& ctx) {
40+
// init
41+
auto x = scope->Var("X");
42+
auto tensor_x = x->GetMutable<f::LoDTensor>();
43+
44+
int dim0 = 5;
45+
int dim1 = 10;
46+
47+
std::vector<T> init;
48+
for (int64_t i = 0; i < dim0 * dim1; ++i) {
49+
init.push_back(static_cast<T>(0.1));
50+
}
51+
52+
TensorFromVector(init, ctx, tensor_x);
53+
tensor_x->Resize({dim0, dim1});
54+
55+
ctx.Wait();
56+
57+
// run
58+
auto place = ctx.GetPlace();
59+
auto out = scope->Var("Out");
60+
auto tensor_out = out->GetMutable<f::LoDTensor>();
61+
62+
std::vector<int> axis;
63+
axis.push_back(1);
64+
f::AttributeMap attrs = {{"axes", axis}};
65+
66+
auto op =
67+
f::OpRegistry::CreateOp("unsqueeze", {{"X", {"X"}}},
68+
{{"Out", {"Out"}}}, attrs);
69+
70+
op->Run(*scope, place);
71+
ctx.Wait();
72+
73+
EXPECT_EQ((uint32_t)tensor_out->dims().size(), uint32_t(3));
74+
EXPECT_EQ((uint32_t)tensor_out->dims()[0], uint32_t(5));
75+
EXPECT_EQ((uint32_t)tensor_out->dims()[1], uint32_t(1));
76+
EXPECT_EQ((uint32_t)tensor_out->dims()[2], uint32_t(10));
77+
78+
std::vector<T> out_vec;
79+
TensorToVector(*tensor_out, ctx, &out_vec);
80+
for (uint32_t i = 0; i < out_vec.size(); i++) {
81+
EXPECT_EQ(out_vec[i], static_cast<T>(0.1));
82+
}
83+
84+
ctx.Wait();
85+
}
86+
87+
TEST(unsqueeze, NPU_fp32) {
88+
f::Scope scope;
89+
p::NPUDeviceContext ctx(p::NPUPlace(0));
90+
Compare<float>(&scope, ctx);
91+
}
92+

0 commit comments

Comments
 (0)