Skip to content
23 changes: 11 additions & 12 deletions paddle/fluid/operators/controlflow/logical_op.cc
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand All @@ -26,15 +23,16 @@ class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
OpComment comment;
AddInput("X", string::Sprintf("Left hand operand of %s operator. Must be "
"a Variable of type bool.",
"a Variable of type being one of bool, int8, "
"int16, int32, int64, float32, float64.",
comment.type));
AddInput("Y", string::Sprintf("Right hand operand of %s operator. Must be "
"a Variable of type bool.",
"a Variable of type being one of bool, int8, "
"int16, int32, int64, float32, float64.",
comment.type));
AddOutput("Out", string::Sprintf("n-dim bool Variable"));
AddComment(string::Sprintf(R"DOC(%s Operator

It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim LoDTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
Expand All @@ -46,13 +44,14 @@ class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X", string::Sprintf("Operand of %s operator. Must be "
"a LoDTensor or Tensor of type bool.",
comment.type));
AddInput("X",
string::Sprintf("Operand of %s operator. Must be "
"a LoDTensor or Tensor of type being one of bool, "
"int8, int16, int32, int64, float32, float64.",
comment.type));
AddOutput("Out", string::Sprintf("n-dim bool LoDTensor or Tensor."));
AddComment(string::Sprintf(R"DOC(%s Operator

It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor.
It operates element-wise on X, and returns the Out. X and Out are N-dim LoDTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
Expand Down
31 changes: 17 additions & 14 deletions paddle/fluid/operators/controlflow/logical_op.cu
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand All @@ -21,13 +18,13 @@ namespace plat = paddle::platform;
namespace paddle {
namespace operators {

#define LOGICAL_BINARY_FUNCTOR(func_name, op) \
template <typename T> \
struct func_name { \
using ELEMENT_TYPE = T; \
HOSTDEVICE bool operator()(const T* args) const { \
return args[0] op args[1]; \
} \
#define LOGICAL_BINARY_FUNCTOR(func_name, op) \
template <typename T> \
struct func_name { \
using ELEMENT_TYPE = T; \
HOSTDEVICE bool operator()(const T* args) const { \
return static_cast<bool>(args[0]) op static_cast<bool>(args[1]); \
} \
};

LOGICAL_BINARY_FUNCTOR(CudaOrFunctor, ||)
Expand Down Expand Up @@ -68,10 +65,16 @@ class BinaryLogicalOpKernel<platform::CUDADeviceContext, Functor>
} // namespace operators
} // namespace paddle

#define REGISTER_LOGICAL_CUDA_KERNEL(op_name, func) \
REGISTER_OP_CUDA_KERNEL( \
op_name, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<bool>>);
#define REGISTER_LOGICAL_CUDA_KERNEL(op_name, func) \
REGISTER_OP_CUDA_KERNEL( \
op_name, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<bool>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<int8_t>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<int16_t>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<int>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<int64_t>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<float>>, \
ops::BinaryLogicalOpKernel<plat::CUDADeviceContext, ops::func<double>>);

REGISTER_LOGICAL_CUDA_KERNEL(logical_or, CudaOrFunctor)
REGISTER_LOGICAL_CUDA_KERNEL(logical_and, CudaAndFunctor)
Expand Down
43 changes: 32 additions & 11 deletions paddle/fluid/operators/controlflow/logical_op.h
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand Down Expand Up @@ -82,12 +79,36 @@ class UnaryLogicalOpKernel
} // namespace operators
} // namespace paddle

#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<bool>>);
#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<bool>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int8_t>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int16_t>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int64_t>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<float>>, \
::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<double>>);

#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<bool>>);
#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<bool>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int8_t>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int16_t>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int64_t>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<float>>, \
::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##DeviceContext, functor<double>>);
29 changes: 22 additions & 7 deletions paddle/fluid/operators/controlflow/logical_op_npu.cc
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand Down Expand Up @@ -82,11 +79,29 @@ class LogicalAndPUKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OP_NPU_KERNEL(logical_not,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, bool>);
REGISTER_OP_NPU_KERNEL(
logical_not, ops::LogicalNotNPUKernel<plat::NPUDeviceContext, bool>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, int8_t>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, int16_t>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, int>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, int64_t>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, float>,
ops::LogicalNotNPUKernel<plat::NPUDeviceContext, double>);

REGISTER_OP_NPU_KERNEL(logical_or,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, bool>);
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, bool>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, int8_t>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, int16_t>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, int>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, int64_t>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, float>,
ops::LogicalOrNPUKernel<plat::NPUDeviceContext, double>);

REGISTER_OP_NPU_KERNEL(logical_and,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, bool>);
ops::LogicalAndPUKernel<plat::NPUDeviceContext, bool>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LogicalAndPUKernel 请确认下这个类型名称;推理认为增加这些实例带来的体积增量 9 MB 有些大了,请给出一些原因吧,谢谢

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LogicalAndPuKernel这个名称是在#34159#34170 这两个pr中定义的。这个pr当中只增加了对更多输入数据类型的支持。

ops::LogicalAndPUKernel<plat::NPUDeviceContext, int8_t>,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, int16_t>,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, int>,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, int64_t>,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, float>,
ops::LogicalAndPUKernel<plat::NPUDeviceContext, double>);
4 changes: 2 additions & 2 deletions paddle/fluid/operators/controlflow/logical_op_xpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class BinaryLogicalOpXPUKernel : public framework::OpKernel<T> {
auto* x = context.Input<framework::Tensor>("X");
auto* y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
T* out_ptr = out->mutable_data<T>(context.GetPlace());
bool* out_ptr = out->mutable_data<bool>(context.GetPlace());
const T* x_ptr = x->data<T>();
const T* y_ptr = y->data<T>();
auto& dev_ctx =
Expand Down Expand Up @@ -153,7 +153,7 @@ class UnaryLogicalOpXPUKernel : public framework::OpKernel<T> {
if (x->numel() == 0) {
return;
}
out->mutable_data<T>(context.GetPlace());
out->mutable_data<bool>(context.GetPlace());
auto& dev_ctx =
context.template device_context<paddle::platform::XPUDeviceContext>();
int ret = xpu::logical_not<bool>(dev_ctx.x_context(), x->data<T>(),
Expand Down
8 changes: 7 additions & 1 deletion paddle/fluid/operators/controlflow/logicaland_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,11 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
logical_and,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, bool>);
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, bool>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, int8_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, int16_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, int>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, int64_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, float>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_AND, double>);
#endif
8 changes: 7 additions & 1 deletion paddle/fluid/operators/controlflow/logicalnot_op_xpu.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,11 @@ limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/controlflow/logical_op_xpu.h"
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(logicalnot, ops::UnaryLogicalOpXPUKernel<bool>);
REGISTER_OP_XPU_KERNEL(logicalnot, ops::UnaryLogicalOpXPUKernel<bool>,
ops::UnaryLogicalOpXPUKernel<int8_t>,
ops::UnaryLogicalOpXPUKernel<int16_t>,
ops::UnaryLogicalOpXPUKernel<int>,
ops::UnaryLogicalOpXPUKernel<int64_t>,
ops::UnaryLogicalOpXPUKernel<float>,
ops::UnaryLogicalOpXPUKernel<double>);
#endif
8 changes: 7 additions & 1 deletion paddle/fluid/operators/controlflow/logicalor_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,11 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
logical_or,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, bool>);
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, bool>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, int8_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, int16_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, int>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, int64_t>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, float>,
ops::BinaryLogicalOpXPUKernel<ops::XpuLogicalType::XPU_OR, double>);
#endif
Loading