|
1 | | -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. |
| 1 | +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. |
2 | 2 | # |
3 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
4 | 4 | # you may not use this file except in compliance with the License. |
5 | 5 | # You may obtain a copy of the License at |
6 | 6 | # |
7 | | -# http://www.apache.org/licenses/LICENSE-2.0 |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
8 | 8 | # |
9 | 9 | # Unless required by applicable law or agreed to in writing, software |
10 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
14 | 14 |
|
15 | 15 | from __future__ import print_function |
16 | 16 |
|
17 | | -import numpy as np |
18 | 17 | import unittest |
| 18 | +import numpy as np |
19 | 19 | import sys |
20 | 20 | sys.path.append("..") |
21 | | -from op_test import OpTest, _set_use_system_allocator |
| 21 | +from op_test import OpTest |
22 | 22 | import paddle |
23 | 23 | import paddle.fluid as fluid |
| 24 | +from paddle.framework import core |
24 | 25 |
|
25 | 26 | paddle.enable_static() |
26 | 27 | SEED = 2021 |
27 | 28 |
|
28 | 29 |
|
| 30 | +def gather_numpy(x, index, axis): |
| 31 | + x_transpose = np.swapaxes(x, 0, axis) |
| 32 | + tmp_gather = x_transpose[index, ...] |
| 33 | + gather = np.swapaxes(tmp_gather, 0, axis) |
| 34 | + return gather |
| 35 | + |
| 36 | + |
29 | 37 | @unittest.skipIf(not paddle.is_compiled_with_npu(), |
30 | 38 | "core is not compiled with NPU") |
31 | 39 | class TestGatherOp(OpTest): |
32 | 40 | def setUp(self): |
33 | 41 | self.set_npu() |
34 | | - self.op_type = "gather" |
35 | 42 | self.place = paddle.NPUPlace(0) |
36 | | - self.init_dtype() |
37 | | - self.init_input_output() |
38 | | - |
| 43 | + self.op_type = "gather" |
| 44 | + self.config() |
| 45 | + xnp = np.random.random(self.x_shape).astype(self.x_type) |
39 | 46 | self.inputs = { |
40 | | - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), |
41 | | - 'Index': OpTest.np_dtype_to_fluid_dtype(self.index) |
| 47 | + 'X': xnp, |
| 48 | + 'Index': np.array(self.index).astype(self.index_type) |
42 | 49 | } |
43 | | - self.attrs = {'validate_indices': True} |
44 | | - self.outputs = {'Out': self.out} |
| 50 | + self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} |
45 | 51 |
|
46 | 52 | def set_npu(self): |
47 | 53 | self.__class__.use_npu = True |
48 | 54 |
|
49 | | - def init_input_output(self): |
50 | | - self.x = np.array([[1, 2], [3, 4], [5, 6]]).astype(self.dtype) |
51 | | - self.index = np.array([1, 2]).astype(np.int) |
52 | | - self.out = np.array([[3, 4], [5, 6]]).astype(self.dtype) |
53 | | - |
54 | | - def init_dtype(self): |
55 | | - self.dtype = np.float32 |
56 | | - |
57 | 55 | def test_check_output(self): |
58 | 56 | self.check_output_with_place(self.place, check_dygraph=False) |
59 | 57 |
|
| 58 | + def test_check_grad(self): |
| 59 | + self.check_grad_with_place( |
| 60 | + self.place, ['X'], |
| 61 | + 'Out', |
| 62 | + max_relative_error=0.006, |
| 63 | + check_dygraph=False) |
60 | 64 |
|
61 | | -@unittest.skipIf(not paddle.is_compiled_with_npu(), |
62 | | - "core is not compiled with NPU") |
63 | | -class TestGatherAPI(unittest.TestCase): |
64 | | - def test_name(self): |
65 | | - with paddle.static.program_guard(paddle.static.Program()): |
66 | | - x = paddle.static.data(name="x", shape=[3, 2], dtype="float32") |
67 | | - index = paddle.static.data(name='index', shape=[1], dtype='int32') |
68 | | - |
69 | | - out = paddle.gather(x, index, name='gather') |
70 | | - self.assertEqual(('gather' in out.name), True) |
| 65 | + def config(self): |
| 66 | + """ |
| 67 | + For multi-dimension input |
| 68 | + """ |
| 69 | + self.x_shape = (10, 20) |
| 70 | + self.x_type = "float32" |
| 71 | + self.index = [1, 3, 5] |
| 72 | + self.index_type = "int32" |
71 | 73 |
|
72 | | - def test_static(self): |
73 | | - with paddle.static.program_guard(paddle.static.Program()): |
74 | 74 |
|
75 | | - x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float32') |
76 | | - index_np = np.array([1, 2]).astype('int32') |
77 | | - |
78 | | - x = paddle.static.data(name="x", shape=[3, 2], dtype='float32') |
79 | | - index = paddle.static.data(name="index", shape=[2], dtype='int32') |
| 75 | +@unittest.skipIf(not paddle.is_compiled_with_npu(), |
| 76 | + "core is not compiled with NPU") |
| 77 | +class TestCase1(TestGatherOp): |
| 78 | + def config(self): |
| 79 | + """ |
| 80 | + For one dimension input |
| 81 | + """ |
| 82 | + self.x_shape = (100) |
| 83 | + self.x_type = "float32" |
| 84 | + self.index = [1, 3, 5] |
| 85 | + self.index_type = "int32" |
80 | 86 |
|
81 | | - z = paddle.gather(x, index) |
82 | 87 |
|
| 88 | +@unittest.skipIf(not paddle.is_compiled_with_npu(), |
| 89 | + "core is not compiled with NPU") |
| 90 | +class API_TestGather(unittest.TestCase): |
| 91 | + def test_out1(self): |
| 92 | + with fluid.program_guard(fluid.Program(), fluid.Program()): |
| 93 | + data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float32') |
| 94 | + index = fluid.layers.data('index', shape=[-1, 1], dtype='int32') |
| 95 | + out = paddle.fluid.layers.gather(data1, index) |
| 96 | + place = paddle.NPUPlace(0) |
| 97 | + exe = fluid.Executor(place) |
| 98 | + input = np.array([[1, 2], [3, 4], [5, 6]]) |
| 99 | + index_1 = np.array([1, 2]) |
| 100 | + result, = exe.run(feed={"data1": input, |
| 101 | + "index": index_1}, |
| 102 | + fetch_list=[out]) |
| 103 | + expected_output = np.array([[3, 4], [5, 6]]) |
| 104 | + self.assertTrue(np.allclose(result, expected_output)) |
| 105 | + |
| 106 | + def test_out2(self): |
| 107 | + with paddle.static.program_guard(paddle.static.Program(), |
| 108 | + paddle.static.Program()): |
| 109 | + x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32') |
| 110 | + index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') |
| 111 | + out = paddle.gather(x, index) |
83 | 112 | place = paddle.NPUPlace(0) |
84 | 113 | exe = paddle.static.Executor(place) |
85 | | - x_value, index_value, z_value = exe.run( |
86 | | - feed={"x": x_np, |
87 | | - "index": index_np}, fetch_list=[x, index, z]) |
88 | | - |
89 | | - z_expected = np.array([[3, 4], [5, 6]]) |
90 | | - self.assertEqual( |
91 | | - (x_value == x_np).all(), |
92 | | - True, |
93 | | - msg="x_value = {}, but expected {}".format(x_value, x_np)) |
94 | | - self.assertEqual( |
95 | | - (index_value == index_np).all(), |
96 | | - True, |
97 | | - msg="index_value = {}, but expected {}".format(index_value, |
98 | | - index_np)) |
99 | | - self.assertEqual( |
100 | | - (z_value == z_expected).all(), |
101 | | - True, |
102 | | - msg="z_value = {}, but expected {}".format(z_value, z_expected)) |
103 | | - |
104 | | - def test_backward(self): |
105 | | - # TODO(ascendrc): Test backward after add grad npu op implemented. |
106 | | - pass |
| 114 | + x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float32') |
| 115 | + index_np = np.array([1, 1]).astype('int32') |
| 116 | + result, = exe.run(feed={"x": x_np, |
| 117 | + "index": index_np}, |
| 118 | + fetch_list=[out]) |
| 119 | + expected_output = gather_numpy(x_np, index_np, axis=0) |
| 120 | + self.assertTrue(np.allclose(result, expected_output)) |
107 | 121 |
|
108 | 122 |
|
109 | 123 | @unittest.skipIf(not paddle.is_compiled_with_npu(), |
110 | 124 | "core is not compiled with NPU") |
111 | | -class TestPowNet(unittest.TestCase): |
| 125 | +class TestGatherGrad(unittest.TestCase): |
112 | 126 | def _test(self, run_npu=True): |
113 | 127 | main_prog = paddle.static.Program() |
114 | 128 | startup_prog = paddle.static.Program() |
@@ -159,5 +173,5 @@ def test_npu(self): |
159 | 173 | self.assertTrue(np.allclose(npu_loss, cpu_loss)) |
160 | 174 |
|
161 | 175 |
|
162 | | -if __name__ == '__main__': |
| 176 | +if __name__ == "__main__": |
163 | 177 | unittest.main() |
0 commit comments