diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index 0fa04d84a255db..226a655b519da3 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -1019,3 +1019,4 @@ def GeneratePythonCFile(filepath, python_c_str): header_path, PYTHON_C_H_TEMPLATE.format(body=generated_python_c_functions_header), ) +# diff --git a/python/paddle/_paddle_docs.py b/python/paddle/_paddle_docs.py index f73d73abeabcc3..28afc5ea5ca9cf 100644 --- a/python/paddle/_paddle_docs.py +++ b/python/paddle/_paddle_docs.py @@ -88,7 +88,8 @@ def add_doc_and_signature(func_name: str, docstr: str, func_def: str) -> None: Returns: Tensor, results of minimum on the specified axis of input tensor, it's data type is the same as input's Tensor. - + Keyword args: + out(Tensor, optional): The output tensor. Examples: .. code-block:: python @@ -193,6 +194,8 @@ def amin( axis: int | Sequence[int] | None = None, keepdim: bool = False, name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) @@ -223,7 +226,8 @@ def amin( be written to this tensor and also returned. The returned tensor and `out` share memory and autograd meta. Default: None. name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - + Keyword args: + out(Tensor, optional): The output tensor. Returns: Tensor, results of maximum on the specified axis of input tensor, it's data type is the same as `x`. @@ -332,6 +336,8 @@ def amax( axis: int | Sequence[int] | None = None, keepdim: bool = False, name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) @@ -406,6 +412,8 @@ def all( axis: int | Sequence[int] | None = None, keepdim: bool = False, name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) diff --git a/test/legacy_test/test_max_min_amax_amin_op.py b/test/legacy_test/test_max_min_amax_amin_op.py index bf89ce7df97c9d..e3e36f4b926ca4 100644 --- a/test/legacy_test/test_max_min_amax_amin_op.py +++ b/test/legacy_test/test_max_min_amax_amin_op.py @@ -280,6 +280,172 @@ def init_case(self): self.keepdim = True +class TestAmaxAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + # Position args (args) + out1 = paddle.amax(x, 1, True) + paddle_dygraph_out.append(out1) + # Key words args (kwargs) for paddle + out2 = paddle.amax(x=x, axis=1, keepdim=True) + paddle_dygraph_out.append(out2) + # Key words args for torch + out3 = paddle.amax(input=x, dim=1, keepdim=True) + paddle_dygraph_out.append(out3) + # Combined args and kwargs + out4 = paddle.amax(x, dim=1, keepdim=True) + paddle_dygraph_out.append(out4) + # Tensor method args + out5 = x.amax(1, True) + paddle_dygraph_out.append(out5) + # Tensor method kwargs + out6 = x.amax(dim=1, keepdim=True) + paddle_dygraph_out.append(out6) + # Test out + out7 = paddle.empty([]) + paddle.amax(x, 1, True, out=out7) + paddle_dygraph_out.append(out7) + # Test default value + out8 = x.amax(1) + # Numpy reference out + ref_out = np.amax(self.np_input, 1, keepdims=True) + # Check + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + ref_out = np.amax(self.np_input, 1, keepdims=False) + np.testing.assert_allclose(ref_out, out8.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + # Position args (args) + out1 = paddle.amax(x, 1, True) + # Key words args (kwargs) for paddle + out2 = paddle.amax(x=x, axis=1, keepdim=True) + # Key words args for torch + out3 = paddle.amax(input=x, dim=1, keepdim=True) + # Combined args and kwargs + out4 = paddle.amax(x, dim=1, keepdim=True) + # Tensor method args + out5 = x.amax(1, True) + # Tensor method kwargs + out6 = x.amax(dim=1, keepdim=True) + # Do not support out in static + # out7 = paddle.empty([]) + # paddle.all(x, 1, True, out=out7) + # Test default value + out8 = x.amax() + exe = base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4, out5, out6, out8], + ) + ref_out = np.amax(self.np_input, 1, keepdims=True) + for out in fetches[:-1]: + np.testing.assert_allclose(out, ref_out) + ref_out = np.amax(self.np_input) + np.testing.assert_allclose(*fetches[-1:], ref_out) + + +class TestAminAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + # Position args (args) + out1 = paddle.amin(x, 1, True) + paddle_dygraph_out.append(out1) + # Key words args (kwargs) for paddle + out2 = paddle.amin(x=x, axis=1, keepdim=True) + paddle_dygraph_out.append(out2) + # Key words args for torch + out3 = paddle.amin(input=x, dim=1, keepdim=True) + paddle_dygraph_out.append(out3) + # Combined args and kwargs + out4 = paddle.amin(x, dim=1, keepdim=True) + paddle_dygraph_out.append(out4) + # Tensor method args + out5 = x.amin(1, True) + paddle_dygraph_out.append(out5) + # Tensor method kwargs + out6 = x.amin(dim=1, keepdim=True) + paddle_dygraph_out.append(out6) + # Test out + out7 = paddle.empty([]) + paddle.amin(x, 1, True, out=out7) + paddle_dygraph_out.append(out7) + # Test default value + out8 = x.amin(1) + # Numpy reference out + ref_out = np.amin(self.np_input, 1, keepdims=True) + # Check + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + ref_out = np.amin(self.np_input, 1, keepdims=False) + np.testing.assert_allclose(ref_out, out8.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + # Position args (args) + out1 = paddle.amin(x, 1, True) + # Key words args (kwargs) for paddle + out2 = paddle.amin(x=x, axis=1, keepdim=True) + # Key words args for torch + out3 = paddle.amin(input=x, dim=1, keepdim=True) + # Combined args and kwargs + out4 = paddle.amin(x, dim=1, keepdim=True) + # Tensor method args + out5 = x.amin(1, True) + # Tensor method kwargs + out6 = x.amin(dim=1, keepdim=True) + # Do not support out in static + # out7 = paddle.empty([]) + # paddle.all(x, 1, True, out=out7) + # Test default value + out8 = x.amin() + exe = base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4, out5, out6, out8], + ) + ref_out = np.amin(self.np_input, 1, keepdims=True) + for out in fetches[:-1]: + np.testing.assert_allclose(out, ref_out) + ref_out = np.amin(self.np_input) + np.testing.assert_allclose(*fetches[-1:], ref_out) + + class TestAmaxAminOutAPI(unittest.TestCase): def _run_api(self, api, x, case): out_buf = paddle.zeros([], dtype=x.dtype)