|
22 | 22 | import paddle.fluid as fluid |
23 | 23 | from paddle.fluid import core |
24 | 24 | import paddle.nn.functional as F |
| 25 | + |
25 | 26 | from test_log_softmax import ref_log_softmax, ref_log_softmax_grad |
| 27 | + |
26 | 28 | paddle.enable_static() |
27 | | -np.random.seed(10) |
28 | 29 |
|
29 | 30 |
|
30 | 31 | class TestLogSoftmaxNPUOp(OpTest): |
@@ -55,10 +56,16 @@ def set_dtype(self): |
55 | 56 | pass |
56 | 57 |
|
57 | 58 | def test_check_output(self): |
58 | | - self.check_output_with_place(self.place) |
| 59 | + if self.dtype == np.float16: |
| 60 | + self.check_output_with_place(self.place, atol=1e-2) |
| 61 | + else: |
| 62 | + self.check_output_with_place(self.place) |
59 | 63 |
|
60 | 64 | def test_check_grad(self): |
61 | | - pass |
| 65 | + if self.dtype == np.float16: |
| 66 | + return |
| 67 | + self.check_grad_with_place( |
| 68 | + self.place, ['X'], ['Out'], user_defined_grads=[self.x_grad]) |
62 | 69 |
|
63 | 70 |
|
64 | 71 | def test_class(op_type, typename): |
@@ -88,8 +95,73 @@ def set_dtype(self): |
88 | 95 | globals()[cls_name] = TestLogSoftmaxAxis |
89 | 96 |
|
90 | 97 |
|
91 | | -for _typename in {'float32'}: |
| 98 | +for _typename in {np.float32, np.float16}: |
92 | 99 | test_class("logsoftmax", _typename) |
93 | 100 | test_class2("logsoftmax", _typename) |
| 101 | + |
| 102 | + |
| 103 | +class TestNNLogSoftmaxAPI(unittest.TestCase): |
| 104 | + def setUp(self): |
| 105 | + self.x_shape = [2, 3, 4, 5] |
| 106 | + self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32) |
| 107 | + self.place = paddle.NPUPlace(0) \ |
| 108 | + if paddle.fluid.core.is_compiled_with_npu() \ |
| 109 | + else paddle.CPUPlace() |
| 110 | + |
| 111 | + def check_api(self, axis=-1): |
| 112 | + ref_out = np.apply_along_axis(ref_log_softmax, axis, self.x) |
| 113 | + |
| 114 | + logsoftmax = paddle.nn.LogSoftmax(axis) |
| 115 | + # test static api |
| 116 | + with paddle.static.program_guard(paddle.static.Program()): |
| 117 | + x = paddle.fluid.data(name='x', shape=self.x_shape) |
| 118 | + y = logsoftmax(x) |
| 119 | + exe = paddle.static.Executor(self.place) |
| 120 | + out = exe.run(feed={'x': self.x}, fetch_list=[y]) |
| 121 | + self.assertTrue(np.allclose(out[0], ref_out)) |
| 122 | + |
| 123 | + # test dygrapg api |
| 124 | + paddle.disable_static(self.place) |
| 125 | + x = paddle.to_tensor(self.x) |
| 126 | + y = logsoftmax(x) |
| 127 | + self.assertTrue(np.allclose(y.numpy(), ref_out)) |
| 128 | + paddle.enable_static() |
| 129 | + |
| 130 | + def test_check_api(self): |
| 131 | + for axis in [-1, 1]: |
| 132 | + self.check_api(axis) |
| 133 | + |
| 134 | + |
| 135 | +class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): |
| 136 | + def setUp(self): |
| 137 | + self.x_shape = [2, 3, 4, 5] |
| 138 | + self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) |
| 139 | + self.place = paddle.NPUPlace(0) \ |
| 140 | + if paddle.fluid.core.is_compiled_with_npu() \ |
| 141 | + else paddle.CPUPlace() |
| 142 | + |
| 143 | + def check_api(self, axis=-1, dtype=None): |
| 144 | + x = self.x.copy() |
| 145 | + if dtype is not None: |
| 146 | + x = x.astype(dtype) |
| 147 | + ref_out = np.apply_along_axis(ref_log_softmax, axis, x) |
| 148 | + with paddle.static.program_guard(paddle.static.Program()): |
| 149 | + x = paddle.fluid.data(name='x', shape=self.x_shape) |
| 150 | + y = F.log_softmax(x, axis, dtype) |
| 151 | + exe = paddle.static.Executor(self.place) |
| 152 | + out = exe.run(feed={'x': self.x}, fetch_list=[y]) |
| 153 | + self.assertTrue(np.allclose(out[0], ref_out)) |
| 154 | + |
| 155 | + paddle.disable_static(self.place) |
| 156 | + x = paddle.to_tensor(self.x) |
| 157 | + y = F.log_softmax(x, axis, dtype) |
| 158 | + self.assertTrue(np.allclose(y.numpy(), ref_out), True) |
| 159 | + paddle.enable_static() |
| 160 | + |
| 161 | + def test_check_api(self): |
| 162 | + for axis in [-1, 1]: |
| 163 | + self.check_api(axis) |
| 164 | + |
| 165 | + |
94 | 166 | if __name__ == '__main__': |
95 | 167 | unittest.main() |
0 commit comments