diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 6f1929c64275b0..6f883b761eb326 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -457,6 +457,7 @@ scale, sgn, sign, + signbit, sin, sin_, sinh, @@ -919,4 +920,5 @@ "index_fill_", 'diagonal_scatter', 'combinations', + 'signbit', ] diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 4c453492f193d8..160e4b26180cd6 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -768,6 +768,7 @@ 'atleast_3d', 'diagonal_scatter', "combinations", + 'signbit', ] # this list used in math_op_patch.py for magic_method bind diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 688b08c597ce01..f98e2b29745762 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -4632,6 +4632,7 @@ def sign(x, name=None): 'int32', 'int64', 'float16', + 'bfloat16', 'float32', 'float64', ], @@ -7169,3 +7170,62 @@ def combinations(x, r=2, with_replacement=False, name=None): grids[i] = grids[i].masked_select(mask) return paddle.stack(grids, 1) + + +def signbit(x, name=None): + r""" + Tests if each element of input has its sign bit set or not. + + Args: + x (Tensor): The input Tensor. Must be one of the following types: float16, float32, float64, bfloat16, uint8, int8, int16, int32, int64. + name (str, optional): Name for the operation (optional, default is None).For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): The output Tensor. The sign bit of the corresponding element of the input tensor, True means negative, False means positive. + + Examples: + .. code-block:: python + :name: signbit-example-1 + + >>> import paddle + >>> paddle.set_device('cpu') + >>> x = paddle.to_tensor([-0., 1.1, -2.1, 0., 2.5], dtype='float32') + >>> res = paddle.signbit(x) + >>> print(res) + Tensor(shape=[5], dtype=bool, place=Place(cpu), stop_gradient=True, + [True, False, True, False, False]) + + .. code-block:: python + :name: signbit-example-2 + + >>> import paddle + >>> paddle.set_device('cpu') + >>> x = paddle.to_tensor([-5, -2, 3], dtype='int32') + >>> res = paddle.signbit(x) + >>> print(res) + Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True, + [True , True , False]) + """ + if not isinstance(x, (paddle.Tensor, Variable)): + raise TypeError(f"x must be tensor type, but got {type(x)}") + + check_variable_and_dtype( + x, + "x", + [ + 'float16', + 'float32', + 'float64', + 'bfloat16', + 'uint8', + 'int8', + 'int16', + 'int32', + 'int64', + ], + "signbit", + ) + neg_zero_x = paddle.to_tensor(np.copysign(1, x.numpy()), dtype=x.dtype) + x = paddle.sign(neg_zero_x) + out = paddle.cast(x < 0, dtype='bool') + return out diff --git a/test/legacy_test/test_signbit.py b/test/legacy_test/test_signbit.py new file mode 100644 index 00000000000000..7969689acfea5c --- /dev/null +++ b/test/legacy_test/test_signbit.py @@ -0,0 +1,96 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle +from paddle.base import core + + +def ref_np_signbit(x: np.ndarray): + return np.signbit(x) + + +class TestSignbitAPI(unittest.TestCase): + def setUp(self) -> None: + self.cuda_support_dtypes = [ + 'float32', + 'float64', + 'uint8', + 'int8', + 'int16', + 'int32', + 'int64', + ] + self.cpu_support_dtypes = [ + 'float32', + 'float64', + 'uint8', + 'int8', + 'int16', + 'int32', + 'int64', + ] + self.place = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.place.append(paddle.CUDAPlace(0)) + + def test_dtype(self): + def run(place): + paddle.disable_static(place) + if core.is_compiled_with_cuda(): + support_dtypes = self.cuda_support_dtypes + else: + support_dtypes = self.cpu_support_dtypes + + for dtype in support_dtypes: + x = paddle.to_tensor( + np.random.randint(-10, 10, size=[12, 20, 2]).astype(dtype) + ) + paddle.signbit(x) + + for place in self.place: + run(place) + + def test_float(self): + def run(place): + paddle.disable_static(place) + if core.is_compiled_with_cuda(): + support_dtypes = self.cuda_support_dtypes + else: + support_dtypes = self.cpu_support_dtypes + + for dtype in support_dtypes: + np_x = np.random.randint(-10, 10, size=[12, 20, 2]).astype( + dtype + ) + x = paddle.to_tensor(np_x) + out = paddle.signbit(x) + np_out = out.numpy() + out_expected = ref_np_signbit(np_x) + np.testing.assert_allclose(np_out, out_expected, rtol=1e-05) + + for place in self.place: + run(place) + + def test_input_type(self): + with self.assertRaises(TypeError): + x = np.random.randint(-10, 10, size=[12, 20, 2]).astype('float32') + x = paddle.signbit(x) + + +if __name__ == "__main__": + unittest.main()