Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4180,7 +4180,7 @@ def broadcast_to(x, shape, name=None):


Args:
x (Tensor): The input tensor, its data type is bool, float16, float32, float64, int32 or int64.
x (Tensor): The input tensor, its data type is bool, float16, float32, float64, int32, int64, uint8 or uint16.
shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 0-D or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
Expand Down Expand Up @@ -4211,7 +4211,7 @@ def expand(x, shape, name=None):
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. And the number of dimensions of ``x`` should be less than the number of elements in ``shape``. The dimension to expand must have a value 0.

Args:
x (Tensor): The input Tensor, its data type is bool, float32, float64, int32 or int64.
x (Tensor): The input Tensor, its data type is bool, float16, float32, float64, int32, int64, uint8 or uint16.
shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 0-D or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
Expand Down Expand Up @@ -4248,7 +4248,7 @@ def expand(x, shape, name=None):
if paddle.utils._contain_var(shape):
shape = paddle.utils.get_int_tensor_list(shape)
else:
TypeError("Shape only supports OpResult, or list, or tuple.")
raise TypeError("Shape only supports Value, or list, or tuple.")
return _C_ops.expand(x, shape)
else:
if isinstance(shape, Variable):
Expand All @@ -4275,6 +4275,7 @@ def expand(x, shape, name=None):
'float64',
'int32',
'int64',
'uint8',
'uint16',
],
'expand',
Expand Down
28 changes: 17 additions & 11 deletions test/legacy_test/test_broadcast_to_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,25 +18,31 @@

import paddle
from paddle import base
from paddle.framework import in_pir_mode
from paddle.pir_utils import test_with_pir_api
from paddle.static import Program, program_guard

paddle.enable_static()


class TestBroadcastToError(unittest.TestCase):
@test_with_pir_api
def test_errors(self):
with program_guard(Program(), Program()):
x1 = base.create_lod_tensor(
np.array([[-1]]), [[1]], base.CPUPlace()
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
shape = [2, 2]
self.assertRaises(TypeError, paddle.tensor.broadcast_to, x1, shape)
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
self.assertRaises(TypeError, paddle.tensor.broadcast_to, x2, shape)
x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool")
x3.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.broadcast_to, x3, shape)
if not in_pir_mode():
x1 = base.create_lod_tensor(
np.array([[-1]]), [[1]], base.CPUPlace()
)
self.assertRaises(
TypeError, paddle.tensor.broadcast_to, x1, shape
)
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="bool")
x2.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.broadcast_to, x2, shape)
x2.stop_gradient = True
self.assertRaises(TypeError, paddle.tensor.broadcast_to, x2, 1)


# Test python API
Expand Down
25 changes: 15 additions & 10 deletions test/legacy_test/test_expand_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import paddle
from paddle import base
from paddle.base import Program, core, program_guard
from paddle.framework import in_pir_mode
from paddle.pir_utils import test_with_pir_api


Expand Down Expand Up @@ -297,19 +298,23 @@ def test_check_grad(self):


class TestExpandV2Error(unittest.TestCase):
@test_with_pir_api
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x1 = base.create_lod_tensor(
np.array([[-1]]), [[1]], base.CPUPlace()
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
shape = [2, 2]
self.assertRaises(TypeError, paddle.tensor.expand, x1, shape)
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
self.assertRaises(TypeError, paddle.tensor.expand, x2, shape)
x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool")
x3.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.expand, x3, shape)
if not in_pir_mode():
x1 = base.create_lod_tensor(
np.array([[-1]]), [[1]], base.CPUPlace()
)
self.assertRaises(TypeError, paddle.tensor.expand, x1, shape)
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="bool")
x2.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.expand, x2, shape)
x2.stop_gradient = True
self.assertRaises(TypeError, paddle.tensor.expand, x2, 1)
paddle.disable_static()


Expand Down