Skip to content

Commit 151c39b

Browse files
committed
update
1 parent d4138e4 commit 151c39b

File tree

2 files changed

+64
-131
lines changed

2 files changed

+64
-131
lines changed

python/paddle/tensor/math.py

Lines changed: 9 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@
9797

9898
if TYPE_CHECKING:
9999
from collections.abc import Sequence
100-
from numbers import Number
101100

102101
from paddle import Tensor
103102
from paddle._typing import DTypeLike
@@ -923,14 +922,13 @@ def subtract_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
923922
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
924923
def divide(
925924
x: Tensor,
926-
y: Tensor | Number,
927-
name: str | None = None,
928-
*,
925+
y: Tensor,
929926
rounding_mode: str | None = None,
930927
out: Tensor | None = None,
928+
name: str | None = None,
931929
) -> Tensor:
932930
"""
933-
Divide tensor x by y (Tensor or Number) element-wise.The equation is:
931+
Divide two tensors element-wise. The equation is:
934932
935933
.. math::
936934
out = x / y
@@ -943,11 +941,11 @@ def divide(
943941
Args:
944942
x (Tensor): the input tensor, it's data type should be bool, bfloat16, float16, float32, float64,
945943
int8, int16, int32, int64, uint8, complex64, complex128.
946-
y (Tensor| Number): the input tensor or scalar, it's data type should be bool, bfloat16, float16, float32, float64,
944+
y (Tensor): the input tensor, it's data type should be bool, bfloat16, float16, float32, float64,
947945
int8, int16, int32, int64, uint8, complex64, complex128.
948-
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
949946
rounding_mode (str|None, optional): The rounding mode. Can be None (default), "trunc" (truncate toward zero), or "floor" (round down toward negative infinity).
950947
out (Tensor, optional): The output tensor. Default: None.
948+
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
951949
952950
Returns:
953951
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
@@ -966,48 +964,6 @@ def divide(
966964
[2. , 0.60000000, 2. ])
967965
968966
"""
969-
if not isinstance(y, paddle.Tensor):
970-
if in_dynamic_or_pir_mode():
971-
y = paddle.to_tensor(y)
972-
else:
973-
if isinstance(y, bool):
974-
dtype = paddle.int64
975-
elif isinstance(y, int):
976-
dtype = (
977-
paddle.int64
978-
if x.dtype in (paddle.int32, paddle.int64)
979-
else x.dtype
980-
)
981-
elif isinstance(y, float):
982-
dtype = (
983-
paddle.float32
984-
if x.dtype in (paddle.float16, paddle.float32)
985-
else x.dtype
986-
)
987-
else:
988-
dtype = x.dtype
989-
y = paddle.full(shape=[1], fill_value=y, dtype=dtype)
990-
991-
# Cross-type operations: When performing integer and floating-point operations, the result only needs to meet floating-point precision requirements. long_tensor + float_tensor → float32 (no need for float64).
992-
INTEGER_DTYPES = {
993-
paddle.int8,
994-
paddle.int16,
995-
paddle.int32,
996-
paddle.int64,
997-
paddle.uint8,
998-
}
999-
FLOAT_DTYPES = {
1000-
paddle.bfloat16,
1001-
paddle.float16,
1002-
paddle.float32,
1003-
paddle.float64,
1004-
}
1005-
1006-
if x.dtype in INTEGER_DTYPES and y.dtype in FLOAT_DTYPES:
1007-
x = x.astype(y.dtype)
1008-
elif y.dtype in INTEGER_DTYPES and x.dtype in FLOAT_DTYPES:
1009-
y = y.astype(x.dtype)
1010-
1011967
if rounding_mode is None:
1012968
if in_dynamic_or_pir_mode():
1013969
res = _C_ops.divide(x, y, out=out)
@@ -1052,7 +1008,7 @@ def divide(
10521008
@inplace_apis_in_dygraph_only
10531009
def divide_(
10541010
x: Tensor,
1055-
y: Tensor | Number,
1011+
y: Tensor,
10561012
name: str | None = None,
10571013
*,
10581014
rounding_mode: str | None = None,
@@ -1061,29 +1017,6 @@ def divide_(
10611017
Inplace version of ``divide`` API, the output Tensor will be inplaced with input ``x``.
10621018
Please refer to :ref:`api_paddle_divide`.
10631019
"""
1064-
if not isinstance(y, paddle.Tensor):
1065-
y = paddle.to_tensor(y)
1066-
1067-
# Cross-type operations: When performing integer and floating-point operations, the result only needs to meet floating-point precision requirements. long_tensor + float_tensor → float32 (no need for float64).
1068-
INTEGER_DTYPES = {
1069-
paddle.int8,
1070-
paddle.int16,
1071-
paddle.int32,
1072-
paddle.int64,
1073-
paddle.uint8,
1074-
}
1075-
FLOAT_DTYPES = {
1076-
paddle.bfloat16,
1077-
paddle.float16,
1078-
paddle.float32,
1079-
paddle.float64,
1080-
}
1081-
1082-
if x.dtype in INTEGER_DTYPES and y.dtype in FLOAT_DTYPES:
1083-
x = x.astype(y.dtype)
1084-
elif y.dtype in INTEGER_DTYPES and x.dtype in FLOAT_DTYPES:
1085-
y = y.astype(x.dtype)
1086-
10871020
out_shape = broadcast_shape(x.shape, y.shape)
10881021
if out_shape != x.shape:
10891022
raise ValueError(
@@ -1107,7 +1040,7 @@ def divide_(
11071040
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
11081041
def div(
11091042
x: Tensor,
1110-
y: Tensor | Number,
1043+
y: Tensor,
11111044
*,
11121045
rounding_mode: str | None = None,
11131046
out: Tensor | None = None,
@@ -1123,7 +1056,7 @@ def div(
11231056
@inplace_apis_in_dygraph_only
11241057
def div_(
11251058
x: Tensor,
1126-
y: Tensor | Number,
1059+
y: Tensor,
11271060
*,
11281061
rounding_mode: str | None = None,
11291062
) -> Tensor:
@@ -1137,7 +1070,7 @@ def div_(
11371070
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
11381071
def true_divide(
11391072
x: Tensor,
1140-
y: Tensor | Number,
1073+
y: Tensor,
11411074
*,
11421075
out: Tensor | None = None,
11431076
) -> Tensor:

test/legacy_test/test_div_op.py

Lines changed: 55 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,12 @@ def test_paddle_divide_with_param_names(self):
4747
expected = self.x_np / self.y_np
4848
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
4949

50-
def test_paddle_divide_with_scalar(self):
51-
"""Test paddle.divide with scalar"""
52-
x = paddle.to_tensor(self.x_np)
53-
out = paddle.divide(x, self.scalar)
54-
expected = self.x_np / self.scalar
55-
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
50+
# def test_paddle_divide_with_scalar(self):
51+
# """Test paddle.divide with scalar"""
52+
# x = paddle.to_tensor(self.x_np)
53+
# out = paddle.divide(x, self.scalar)
54+
# expected = self.x_np / self.scalar
55+
# np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
5656

5757
def test_paddle_divide_rounding_modes(self):
5858
"""Test paddle.divide with different rounding modes"""
@@ -85,31 +85,31 @@ def test_divide_with_out_and_rounding_modes(self):
8585
expected_floor = np.array([2.0, -3.0, 1.0, -2.0])
8686
np.testing.assert_allclose(out.numpy(), expected_floor, rtol=1e-20)
8787

88-
def test_paddle_divide_mixed_dtypes(self):
89-
"""Test paddle.divide with mixed dtypes (int/float combinations)"""
90-
test_cases = [
91-
# (x_dtype, y_dtype, expected_dtype)
92-
('int8', 'float16', 'float16'),
93-
('int16', 'float32', 'float32'),
94-
('uint8', 'float64', 'float64'),
95-
('int32', 'bfloat16', 'bfloat16'),
96-
('float16', 'int64', 'float16'),
97-
('bfloat16', 'uint8', 'bfloat16'),
98-
('float64', 'int8', 'float64'),
99-
]
100-
101-
for x_dtype, y_dtype, expected_dtype in test_cases:
102-
with self.subTest(x_dtype=x_dtype, y_dtype=y_dtype):
103-
x = paddle.to_tensor([1, 2, 3], dtype=x_dtype)
104-
y = paddle.to_tensor([2, 1, 3], dtype=y_dtype)
105-
106-
out = paddle.divide(x, y)
107-
108-
self.assertEqual(
109-
out.dtype,
110-
getattr(paddle, expected_dtype),
111-
f'Dtype mismatch: {x_dtype}/{y_dtype} should be {expected_dtype}',
112-
)
88+
# def test_paddle_divide_mixed_dtypes(self):
89+
# """Test paddle.divide with mixed dtypes (int/float combinations)"""
90+
# test_cases = [
91+
# # (x_dtype, y_dtype, expected_dtype)
92+
# ('int8', 'float16', 'float16'),
93+
# ('int16', 'float32', 'float32'),
94+
# ('uint8', 'float64', 'float64'),
95+
# ('int32', 'bfloat16', 'bfloat16'),
96+
# ('float16', 'int64', 'float16'),
97+
# ('bfloat16', 'uint8', 'bfloat16'),
98+
# ('float64', 'int8', 'float64'),
99+
# ]
100+
101+
# for x_dtype, y_dtype, expected_dtype in test_cases:
102+
# with self.subTest(x_dtype=x_dtype, y_dtype=y_dtype):
103+
# x = paddle.to_tensor([1, 2, 3], dtype=x_dtype)
104+
# y = paddle.to_tensor([2, 1, 3], dtype=y_dtype)
105+
106+
# out = paddle.divide(x, y)
107+
108+
# self.assertEqual(
109+
# out.dtype,
110+
# getattr(paddle, expected_dtype),
111+
# f'Dtype mismatch: {x_dtype}/{y_dtype} should be {expected_dtype}',
112+
# )
113113

114114
def test_paddle_divide_static_graph(self):
115115
"""Test paddle.divide in static graph"""
@@ -236,12 +236,12 @@ def test_paddle_div_with_param_names(self):
236236
expected = self.x_np / self.y_np
237237
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
238238

239-
def test_paddle_div_with_scalar(self):
240-
"""Test paddle.div with scalar"""
241-
x = paddle.to_tensor(self.x_np)
242-
out = paddle.div(x, self.scalar)
243-
expected = self.x_np / self.scalar
244-
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
239+
# def test_paddle_div_with_scalar(self):
240+
# """Test paddle.div with scalar"""
241+
# x = paddle.to_tensor(self.x_np)
242+
# out = paddle.div(x, self.scalar)
243+
# expected = self.x_np / self.scalar
244+
# np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
245245

246246
def test_paddle_div_rounding_modes(self):
247247
"""Test paddle.div with different rounding modes"""
@@ -344,12 +344,12 @@ def test_paddle_divide__with_param_names(self):
344344
expected = self.x_np / self.y_np
345345
np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
346346

347-
def test_paddle_divide__with_scalar(self):
348-
"""Test paddle.divide_ with scalar"""
349-
x = paddle.to_tensor(self.x_np)
350-
x.divide_(self.scalar)
351-
expected = self.x_np / self.scalar
352-
np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
347+
# def test_paddle_divide__with_scalar(self):
348+
# """Test paddle.divide_ with scalar"""
349+
# x = paddle.to_tensor(self.x_np)
350+
# x.divide_(self.scalar)
351+
# expected = self.x_np / self.scalar
352+
# np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
353353

354354
def test_paddle_divide__rounding_modes(self):
355355
"""Test paddle.divide_ with different rounding modes"""
@@ -391,12 +391,12 @@ def test_paddle_div__with_param_names(self):
391391
expected = self.x_np / self.y_np
392392
np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
393393

394-
def test_paddle_div__with_scalar(self):
395-
"""Test paddle.div_ with scalar"""
396-
x = paddle.to_tensor(self.x_np)
397-
x.div_(self.scalar)
398-
expected = self.x_np / self.scalar
399-
np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
394+
# def test_paddle_div__with_scalar(self):
395+
# """Test paddle.div_ with scalar"""
396+
# x = paddle.to_tensor(self.x_np)
397+
# x.div_(self.scalar)
398+
# expected = self.x_np / self.scalar
399+
# np.testing.assert_allclose(x.numpy(), expected, rtol=1e-6)
400400

401401
def test_paddle_div__rounding_modes(self):
402402
"""Test paddle.div_ with different rounding modes"""
@@ -443,12 +443,12 @@ def test_paddle_true_divide_with_param_names(self):
443443
expected = self.x_np / self.y_np
444444
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
445445

446-
def test_paddle_true_divide_with_scalar(self):
447-
"""Test paddle.true_divide with scalar"""
448-
x = paddle.to_tensor(self.x_np)
449-
out = paddle.true_divide(x, self.scalar)
450-
expected = self.x_np / self.scalar
451-
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
446+
# def test_paddle_true_divide_with_scalar(self):
447+
# """Test paddle.true_divide with scalar"""
448+
# x = paddle.to_tensor(self.x_np)
449+
# out = paddle.true_divide(x, self.scalar)
450+
# expected = self.x_np / self.scalar
451+
# np.testing.assert_allclose(out.numpy(), expected, rtol=1e-6)
452452

453453
def test_paddle_true_divide_static_graph(self):
454454
"""Test paddle.true_divide in static graph"""

0 commit comments

Comments
 (0)