Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6491,7 +6491,7 @@ def unfold(x, axis, size, step, name=None):


def _index_fill_impl(x, index, axis, value, inplace):
if not isinstance(index, Variable):
if not isinstance(index, (Variable, paddle.pir.Value)):
raise ValueError("index must be Tensor")

if not isinstance(value, Variable):
Expand Down
7 changes: 2 additions & 5 deletions test/deprecated/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ endif()
# Some ops need to check results when gc is enabled
# Currently, only ops that register NoNeedBufferVarsInference need to do this test
set(TEST_OPS_WITH_GC test_affine_channel_op test_gather_nd_op test_lod_reset_op
test_lookup_table_op test_scatter_op test_slice_op)
test_scatter_op test_slice_op)

foreach(TEST_OP ${TEST_OPS_WITH_GC})
list(REMOVE_ITEM TEST_OPS ${TEST_OP})
Expand Down Expand Up @@ -676,7 +676,6 @@ set_tests_properties(test_imperative_transformer_sorted_gradient
set_tests_properties(test_matmul_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_nearest_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_trilinear_interp_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_bicubic_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_static_save_load PROPERTIES TIMEOUT 250)
set_tests_properties(test_paddle_save_load_binary PROPERTIES TIMEOUT 120)
if(WIN32)
Expand Down Expand Up @@ -826,15 +825,13 @@ set(STATIC_BUILD_TESTS
test_fetch_lod_tensor_array
test_fuse_bn_act_pass
test_layer_norm_op
test_lookup_table_bf16_op
test_lookup_table_v2_op
test_lookup_table_v2_op_deprecated
test_matmul_op
test_matmul_v2_op
test_momentum_op
test_nce
test_paddle_save_load_binary
test_reduce_op
test_segment_ops
test_shuffle_batch_op
test_sparse_conv_op
test_sparse_norm_op
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
from op_test import (
convert_uint16_to_float,
)

import paddle
from paddle import base, enable_static


def _lookup(weights, ids, flat_ids, op_version="lookup_table"):
w_shape = weights.shape
out_shape = (
list(ids.shape[:-1])
if op_version == "lookup_table"
else list(ids.shape)
)
out_shape.append(w_shape[-1])
out = weights[flat_ids].reshape(out_shape)
return out


class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase):
"""
Test embedding layer api and results for bfloat16
"""

def set_initializer(self):
self.initializer = paddle.nn.initializer.Constant(value=self.value)

def setUp(self):
self.ids_shape = [4, 1]
self.w_shape = [10, 64]
self.ids = np.random.randint(low=0, high=9, size=self.ids_shape).astype(
"int64"
)
self.flat_ids = self.ids.flatten()
self.value = 3.0
self.w_fp32 = np.full(self.w_shape, self.value)
self.place = base.CPUPlace()
self.prog = base.Program()
self.startup_prog = base.Program()
self.set_initializer()
paddle.enable_static()

with base.program_guard(self.prog, self.startup_prog):
x = paddle.static.data(
name='x', shape=self.ids_shape, dtype='int64'
)
self.emb = paddle.static.nn.embedding(
input=x,
size=self.w_shape,
param_attr=base.ParamAttr(
name="emb_weight", initializer=self.initializer
),
is_sparse=False,
dtype="uint16",
) # bfloat16
exe = base.Executor(self.place)
exe.run(self.startup_prog)
self.result = exe.run(
self.prog, feed={'x': self.ids}, fetch_list=['emb_weight', self.emb]
)

def test_embedding_weights(self):
result = convert_uint16_to_float(self.result[0])
np.testing.assert_array_equal(self.w_fp32, result)

def test_lookup_results(self):
lookup_result = convert_uint16_to_float(self.result[1].squeeze(-2))
lookup_ref = _lookup(self.w_fp32, self.ids, self.flat_ids)
np.testing.assert_array_equal(lookup_result, lookup_ref)


if __name__ == "__main__":
enable_static()
unittest.main()
67 changes: 67 additions & 0 deletions test/deprecated/legacy_test/test_lookup_table_op_deprecated.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
from op_test import (
paddle_static_guard,
)

import paddle
from paddle.base import Program, program_guard


class TestEmbedOpError(unittest.TestCase):
def test_errors(self):
with paddle_static_guard():
with program_guard(Program(), Program()):
input_data = np.random.randint(0, 10, (4, 1)).astype("int64")

def test_Variable():
# the input type must be Variable
paddle.static.nn.embedding(input=input_data, size=(10, 64))

self.assertRaises(TypeError, test_Variable)

def test_input_dtype():
# the input dtype must be int64
input = paddle.static.data(
name='x', shape=[4, 1], dtype='float32'
)
paddle.static.nn.embedding(input=input, size=(10, 64))

self.assertRaises(TypeError, test_input_dtype)

def test_param_dtype():
# dtype must be float32 or float64
input2 = paddle.static.data(
name='x2', shape=[4, 1], dtype='int64'
)
paddle.static.nn.embedding(
input=input2, size=(10, 64), dtype='int64'
)

self.assertRaises(TypeError, test_param_dtype)

input3 = paddle.static.data(
name='x3', shape=[4, 1], dtype='int64'
)
paddle.static.nn.embedding(
input=input3, size=(10, 64), dtype='float16'
)


if __name__ == "__main__":
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -20,65 +20,9 @@

import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


class TestLookupTableV2BF16Op(test_lookup_table_bf16_op.TestLookupTableBF16Op):
def init_test(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = 4
self.mkldnn_data_type = "bfloat16"


class TestLookupTableV2BF16OpIds4D(
test_lookup_table_bf16_op.TestLookupTableBF16OpIds4D
):
def init_test(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = (2, 4, 5)
self.mkldnn_data_type = "bfloat16"


class TestLookupTableV2BF16OpWIsSelectedRows(
test_lookup_table_bf16_op.TestLookupTableBF16OpWIsSelectedRows
):
def init_test(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = 10


class TestLookupTableV2BF16OpWIsSelectedRows4DIds(
test_lookup_table_bf16_op.TestLookupTableBF16OpWIsSelectedRows4DIds
):
def init_test(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = (3, 4, 5)


class TestLookupTableBF16OpWithPadding(TestLookupTableV2BF16Op):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output_with_place(core.CPUPlace())


class TestLookupTableBF16OpIds4DPadding(TestLookupTableV2BF16OpIds4D):
def test_check_output(self):
ids = self.inputs['Ids']
flatten_idx = ids.flatten()
padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output_with_place(core.CPUPlace())


class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase):
"""
Test embedding layer from input api and results for bfloat16
Expand All @@ -103,6 +47,7 @@ def setUp(self):
self.startup_prog = base.Program()
self.set_initializer()

paddle.enable_static()
with base.program_guard(self.prog, self.startup_prog):
x = paddle.static.data(
name='x', shape=[-1] + self.ids_shape, dtype='int64'
Expand Down
Loading