Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
2ef9c85
Add 11 APIs depending on out
LittleHeroZZZX Aug 13, 2025
fdce7a4
Add tyoe hint for complex
LittleHeroZZZX Aug 13, 2025
145a9b6
restrict out param to be keyword-only argument
LittleHeroZZZX Aug 14, 2025
e5ce1cb
adapt name param
LittleHeroZZZX Aug 14, 2025
a7b2aba
put name before keyword-only argument
LittleHeroZZZX Aug 14, 2025
11a2970
Merge remote-tracking branch 'origin/develop' into feat/out_ops2
LittleHeroZZZX Aug 15, 2025
bee131a
Merge remote-tracking branch 'origin/develop' into feat/out_ops2
LittleHeroZZZX Aug 18, 2025
8346a92
Enable unittests
LittleHeroZZZX Aug 18, 2025
32a1211
Test C++ lowering test - sin
LittleHeroZZZX Aug 18, 2025
af8a3df
Remove old ir tests relating to sin
LittleHeroZZZX Aug 19, 2025
e547119
Merge remote-tracking branch 'origin/develop' into feat/out_ops2
LittleHeroZZZX Aug 19, 2025
19ecd26
add out ops
LittleHeroZZZX Aug 19, 2025
1f59707
refine doc for sin
LittleHeroZZZX Aug 19, 2025
b3b39f5
Remove old ir tests
LittleHeroZZZX Aug 19, 2025
d4a9f4f
Merge branch 'develop' into feat/out_ops2
LittleHeroZZZX Aug 19, 2025
2240405
Comment the deprecated ut
LittleHeroZZZX Aug 20, 2025
f120923
Refine codestyle
LittleHeroZZZX Aug 20, 2025
4a56b23
Remove ParamAlais for multiply
LittleHeroZZZX Aug 20, 2025
a8782fb
Use use_default_mapping in ops.yaml
LittleHeroZZZX Aug 20, 2025
a25be34
temp remove docs
LittleHeroZZZX Aug 20, 2025
0d583ad
Merge remote-tracking branch 'origin/develop' into feat/out_ops2
LittleHeroZZZX Aug 20, 2025
2c1a8de
restore docs
LittleHeroZZZX Aug 20, 2025
1d6858d
restore test_pylayer
LittleHeroZZZX Aug 20, 2025
e375769
Merge remote-tracking branch 'origin/develop' into feat/out_ops2
LittleHeroZZZX Aug 21, 2025
7829ddf
Merge branch 'develop' into feat/out_ops2
LittleHeroZZZX Aug 21, 2025
6761f5b
refine code style
LittleHeroZZZX Aug 21, 2025
685c288
Remove old ir tests
LittleHeroZZZX Aug 25, 2025
6303236
Restore ut
LittleHeroZZZX Aug 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x,
}

// Forward API Call
auto api_result = paddle::experimental::multiply(x, y);
auto api_result = paddle::experimental::multiply(x, y, input_out);
// Check NaN and Inf if needed

if (FLAGS_check_nan_inf) {
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/nn/quant/functional_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,68 +28,68 @@ def __init__(self):
super().__init__()

def forward(self, x, y, name=None):
return math.add(x, y, name)
return math.add(x, y, name=name)


class subtract(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, y, name=None):
return math.subtract(x, y, name)
return math.subtract(x, y, name=name)


class multiply(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, y, name=None):
return math.multiply(x, y, name)
return math.multiply(x, y, name=name)


class divide(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, y, name=None):
return math.divide(x, y, name)
return math.divide(x, y, name=name)


class reshape(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, shape, name=None):
return manipulation.reshape(x, shape, name)
return manipulation.reshape(x, shape, name=name)


class transpose(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, perm, name=None):
return manipulation.transpose(x, perm, name)
return manipulation.transpose(x, perm, name=name)


class concat(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, axis=0, name=None):
return manipulation.concat(x, axis, name)
return manipulation.concat(x, axis, name=name)


class flatten(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, start_axis=0, stop_axis=-1, name=None):
return manipulation.flatten(x, start_axis, stop_axis, name)
return manipulation.flatten(x, start_axis, stop_axis, name=name)


class matmul(FloatFunctionalLayer):
def __init__(self):
super().__init__()

def forward(self, x, y, transpose_x=False, transpose_y=False, name=None):
return linalg.matmul(x, y, transpose_x, transpose_y, name)
return linalg.matmul(x, y, transpose_x, transpose_y, name=name)
21 changes: 16 additions & 5 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3170,15 +3170,19 @@ def _memcpy(input, place=None, output=None) -> paddle.Tensor:


def complex(
real: paddle.Tensor, imag: paddle.Tensor, out=None, name: str | None = None
real: paddle.Tensor,
imag: paddle.Tensor,
*,
out: paddle.Tensor | None = None,
name: str | None = None,
) -> paddle.Tensor:
"""Return a complex tensor given the real and image component.

Args:
real (Tensor): The real component. The data type should be 'float32' or 'float64'.
imag (Tensor): The image component. The data type should be the same as ``real``.
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
out (Tensor|None, optional): The output tensor. Default: None.
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以帮忙给这个 API 也加一下类型提示么?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done


Returns:
Tensor, The output tensor. The data type is 'complex64' or 'complex128', with the same precision as ``real`` and ``imag``.
Expand Down Expand Up @@ -3392,14 +3396,19 @@ def triu_indices(


def polar(
abs: paddle.Tensor, angle: paddle.Tensor, name: str | None = None
abs: paddle.Tensor,
angle: paddle.Tensor,
*,
out: paddle.Tensor | None = None,
name: str | None = None,
Copy link
Member

@SigureMo SigureMo Aug 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

问一下,这些 API 里的 outname 是否应该作为「仅关键字参数」呢?即 3397 行前是不是应该加一个 *?只是疑问 @zhwesky2010

) -> paddle.Tensor:
"""Return a Cartesian coordinates corresponding to the polar coordinates complex tensor given the ``abs`` and ``angle`` component.

Args:
abs (Tensor): The abs component. The data type should be 'float32' or 'float64'.
angle (Tensor): The angle component. The data type should be the same as ``abs``.
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
out (Tensor, optional): The output tensor. If set, the result will be stored in this tensor. Default is None.
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

Returns:
Tensor, The output tensor. The data type is 'complex64' or 'complex128', with the same precision as ``abs`` and ``angle``.
Expand Down Expand Up @@ -3428,7 +3437,9 @@ def polar(
angle, 'angle', ['float32', 'float64'], 'paddle.polar'
)

return paddle.complex(abs * paddle.cos(angle), abs * paddle.sin(angle))
return paddle.complex(
abs * paddle.cos(angle), abs * paddle.sin(angle), out=out, name=name
)


@dygraph_only
Expand Down
10 changes: 8 additions & 2 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2187,8 +2187,13 @@ def roll(
return out


@ParamAliasDecorator({"x": ["tensors"], "axis": ["dim"]})
def stack(
x: Sequence[Tensor], axis: int = 0, name: str | None = None
x: Sequence[Tensor],
axis: int = 0,
*,
out: Tensor | None = None,
name: str | None = None,
) -> Tensor:
"""
Stacks all the input tensors ``x`` along ``axis`` dimension.
Expand Down Expand Up @@ -2288,6 +2293,7 @@ def stack(
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
out (Tensor, optional): The output tensor. If set, the output will be written to this tensor.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Expand Down Expand Up @@ -2342,7 +2348,7 @@ def stack(
axis = 0 if axis is None else axis

if in_dynamic_mode():
return _C_ops.stack(x, axis)
return _C_ops.stack(x, axis, out=out)

if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a DENSE_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
Expand Down
37 changes: 28 additions & 9 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,10 @@ def _get_reduce_axis_with_tensor(axis, x):
return reduce_all, axis


def log(x: Tensor, name: str | None = None) -> Tensor:
@ParamAliasDecorator({"x": ["input"]})
def log(
x: Tensor, *, out: Tensor | None = None, name: str | None = None
) -> Tensor:
r"""
Calculates the natural log of the given input Tensor, element-wise.

Expand All @@ -163,6 +166,7 @@ def log(x: Tensor, name: str | None = None) -> Tensor:

Args:
x (Tensor): Input Tensor. Must be one of the following types: int32, int64, float16, bfloat16, float32, float64, complex64, complex128.
out (Tensor, optional): The output Tensor. If set, the result will be stored in this tensor. Default is None.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`


Expand All @@ -183,7 +187,7 @@ def log(x: Tensor, name: str | None = None) -> Tensor:
[1.94591010, 2.07944155, 2.19722462]])
"""
if in_dynamic_or_pir_mode():
return _C_ops.log(x)
return _C_ops.log(x, out=out)
else:
check_variable_and_dtype(
x,
Expand Down Expand Up @@ -519,7 +523,13 @@ def scale_(


@ParamAliasDecorator({"x": ["input"], "y": ["exponent"]})
def pow(x: Tensor, y: float | Tensor, name: str | None = None) -> Tensor:
def pow(
x: Tensor,
y: float | Tensor,
*,
out: Tensor | None = None,
name: str | None = None,
) -> Tensor:
"""
Compute the power of Tensor elements. The equation is:

Expand All @@ -540,6 +550,7 @@ def pow(x: Tensor, y: float | Tensor, name: str | None = None) -> Tensor:
input: An alias for ``x`` , with identical behavior.
y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.
exponent: An alias for ``y`` , with identical behavior.
out (Tensor, optional): The output tensor. If set, the result will be stored in this tensor. Default is None.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Expand Down Expand Up @@ -575,9 +586,9 @@ def pow(x: Tensor, y: float | Tensor, name: str | None = None) -> Tensor:
# in dynamic graph mode
if in_dynamic_or_pir_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, y)
return _C_ops.pow(x, y, out=out)
elif isinstance(y, (paddle.Tensor, Variable, paddle.pir.Value)):
return _C_ops.elementwise_pow(x, y)
return _C_ops.elementwise_pow(x, y, out=out)
else:
raise TypeError(
f"y must be scalar, Tensor(in dygraph mode), Value(in pir mode) but received: {type(y)}"
Expand Down Expand Up @@ -1120,7 +1131,10 @@ def remainder_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
"""


def multiply(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
def multiply(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个能下沉吗

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个不在 ops.yaml 中

x: Tensor, y: Tensor, *, out: Tensor | None = None, name: str | None = None
) -> Tensor:
"""
multiply two tensors element-wise. The equation is:

Expand All @@ -1138,6 +1152,7 @@ def multiply(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
Args:
x (Tensor): the input tensor, its data type should be one of bfloat16, float16, float32, float64, int32, int64, bool, complex64, complex128.
y (Tensor): the input tensor, its data type should be one of bfloat16, float16, float32, float64, int32, int64, bool, complex64, complex128.
out (Tensor|None, optional): The output tensor. If set, the result will be stored in this tensor. Default is None.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Expand Down Expand Up @@ -1166,7 +1181,7 @@ def multiply(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:

"""
if in_dynamic_or_pir_mode():
return _C_ops.multiply(x, y)
return _C_ops.multiply(x, y, out=out)
else:
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))

Expand Down Expand Up @@ -5077,12 +5092,16 @@ def prod(
return out


def sign(x: Tensor, name: str | None = None) -> Tensor:
@ParamAliasDecorator({"x": ["input"]})
def sign(
x: Tensor, *, out: Tensor | None = None, name: str | None = None
) -> Tensor:
"""
Returns sign of every element in `x`: For real numbers, 1 for positive, -1 for negative and 0 for zero. For complex numbers, the return value is a complex number with unit magnitude. If a complex number element is zero, the result is 0+0j.

Args:
x (Tensor): The input tensor. The data type can be uint8, int8, int16, int32, int64, bfloat16, float16, float32, float64, complex64 or complex128.
out (Tensor|None, optional): The output tensor. If set, the result will be stored in this tensor. Default is None.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Expand All @@ -5100,7 +5119,7 @@ def sign(x: Tensor, name: str | None = None) -> Tensor:
[ 1., 0., -1., 1.])
"""
if in_dynamic_or_pir_mode():
return _C_ops.sign(x)
return _C_ops.sign(x, out=out)
else:
check_variable_and_dtype(
x,
Expand Down
Loading
Loading