Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions paddle/fluid/operators/scale_op_npu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include <string>

#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/scale_op.h"

namespace paddle {
namespace operators {

template <typename DeviceContext, typename T>
class ScaleNPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
auto scale = static_cast<float>(ctx.Attr<float>("scale"));
auto bias = static_cast<float>(ctx.Attr<float>("bias"));
auto bias_after_scale = ctx.Attr<bool>("bias_after_scale");
auto stream =
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();
float _power = 1.0;
if (bias_after_scale) {
out->mutable_data<T>(ctx.GetPlace());
auto runner =
NpuOpRunner("Power", {*x}, {*out},
{{"power", _power}, {"scale", scale}, {"shift", bias}});

runner.Run(stream);
} else {
Tensor tmp_x(x->type());
tmp_x.Resize(x->dims());
tmp_x.mutable_data<T>(ctx.GetPlace());
auto runner_tmp = NpuOpRunner("Adds", {*x}, {tmp_x}, {{"value", bias}});
runner_tmp.Run(stream);

out->mutable_data<T>(ctx.GetPlace());
float _bias = 0.0;
auto runner =
NpuOpRunner("Power", {tmp_x}, {*out},
{{"power", _power}, {"scale", scale}, {"shift", _bias}});
runner.Run(stream);
}
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_NPU_KERNEL(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

support fp16?

scale, ops::ScaleNPUKernel<paddle::platform::NPUDeviceContext, float>,
ops::ScaleNPUKernel<paddle::platform::NPUDeviceContext,
paddle::platform::float16>);
89 changes: 89 additions & 0 deletions python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid

paddle.enable_static()
SEED = 2021


@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestScale(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "scale"
self.place = paddle.NPUPlace(0)
self.init_dtype()

self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(
np.random.random((10, 10)).astype(self.dtype))
}
self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': True}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

建议增加bias_after_scale=False的单测,增强单测的覆盖率

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add

self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}

def set_npu(self):
self.__class__.use_npu = True

def init_dtype(self):
self.dtype = np.float32
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

建议增加int类型的单测

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

去掉int kernel

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

原始单测和xpu单测都没有int的 先暂时去掉了


def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)


class TestFP16Scale(TestScale):
def init_dtype(self):
self.dtype = np.float16


class TestBiasAfterScale(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "scale"
self.place = paddle.NPUPlace(0)
self.init_dtype()

self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(
np.random.random((10, 10)).astype(self.dtype))
}
self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': False}
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}

def set_npu(self):
self.__class__.use_npu = True

def init_dtype(self):
self.dtype = np.float32

def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)


if __name__ == '__main__':
unittest.main()