Skip to content

Commit 379216a

Browse files
authored
[Clean Fluid] Rm and mv some fluid dygrah apis (#48576)
Remove fluid dygrah apis GroupNorm TreeConv Move fluid dygraph apis Flatten SpectralNorm
1 parent 592ed40 commit 379216a

File tree

10 files changed

+221
-788
lines changed

10 files changed

+221
-788
lines changed

python/paddle/fluid/dygraph/nn.py

Lines changed: 0 additions & 422 deletions
Large diffs are not rendered by default.

python/paddle/fluid/reader.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1351,9 +1351,9 @@ def __init__(
13511351
self._use_double_buffer = use_double_buffer
13521352
self._capacity = capacity
13531353
if not self._iterable:
1354-
# Because layers.io.double_buffer is not supported anymore, and only when iterable and use_double_buffer
1355-
# are both True layers.io.double_buffer will be in use, here if itrable is False, use_double_buffer will be
1356-
# forcely set False to avoid using layers.io.double_buffer.
1354+
# Because layers.io.double_buffer is not supported anymore and that iterable is False and use_double_buffer
1355+
# is True is not spported, here if itrable is False, use_double_buffer will be
1356+
# forcely set False to avoid unexpected error.
13571357
# TODO: keep use_double_buffer
13581358
self._use_double_buffer = False
13591359
self._init_non_iterable()

python/paddle/fluid/tests/unittests/test_group_norm_op.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -293,21 +293,25 @@ def attr_data_format():
293293

294294
class TestGroupNormEager(unittest.TestCase):
295295
def test_dygraph_api(self):
296-
self.dtype = np.float64
296+
297+
# not supported float64
298+
# only support float32
299+
self.dtype = np.float32
300+
297301
self.shape = (8, 32, 32)
298302
input = np.random.random(self.shape).astype(self.dtype)
299303

300304
with fluid.dygraph.guard():
301305
tensor_1 = fluid.dygraph.to_variable(input)
302306
tensor_1.stop_gradient = False
303-
groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)
307+
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
304308
ret1 = groupNorm(tensor_1)
305309
ret1.backward()
306310
with _test_eager_guard():
307311
tensor_eager_1 = fluid.dygraph.to_variable(input)
308312
tensor_eager_1.stop_gradient = False
309-
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
310-
channels=32, groups=4
313+
groupNorm_eager = paddle.nn.GroupNorm(
314+
num_channels=32, num_groups=4
311315
)
312316
ret2 = groupNorm_eager(tensor_eager_1)
313317
ret2.backward()
@@ -328,16 +332,14 @@ def test_dygraph_api(self):
328332
with fluid.dygraph.guard():
329333
tensor_1 = fluid.dygraph.to_variable(input)
330334
tensor_1.stop_gradient = False
331-
groupNorm = fluid.dygraph.nn.GroupNorm(
332-
channels=32, groups=4, dtype='float32'
333-
)
335+
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
334336
ret1 = groupNorm(tensor_1)
335337
ret1.backward()
336338
with _test_eager_guard():
337339
tensor_eager_1 = fluid.dygraph.to_variable(input)
338340
tensor_eager_1.stop_gradient = False
339-
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
340-
channels=32, groups=4
341+
groupNorm_eager = paddle.nn.GroupNorm(
342+
num_channels=32, num_groups=4
341343
)
342344
ret2 = groupNorm_eager(tensor_eager_1)
343345
ret2.backward()
@@ -351,23 +353,25 @@ def test_dygraph_api(self):
351353

352354
class TestGroupNormEager_fp16(unittest.TestCase):
353355
def test_dygraph_api(self):
356+
357+
# not supported float16
358+
# only support float32
354359
self.dtype = np.float32
360+
355361
self.shape = (8, 32, 32)
356362
input = np.random.random(self.shape).astype(self.dtype)
357363

358364
with fluid.dygraph.guard():
359365
tensor_1 = fluid.dygraph.to_variable(input)
360366
tensor_1.stop_gradient = False
361-
groupNorm = fluid.dygraph.nn.GroupNorm(
362-
channels=32, groups=4, dtype='float16'
363-
)
367+
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
364368
ret1 = groupNorm(tensor_1)
365369
ret1.backward()
366370
with _test_eager_guard():
367371
tensor_eager_1 = fluid.dygraph.to_variable(input)
368372
tensor_eager_1.stop_gradient = False
369-
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
370-
channels=32, groups=4
373+
groupNorm_eager = paddle.nn.GroupNorm(
374+
num_channels=32, num_groups=4
371375
)
372376
ret2 = groupNorm_eager(tensor_eager_1)
373377
ret2.backward()

python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py

Lines changed: 0 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import paddle
2020
import paddle.fluid as fluid
2121
import paddle.fluid.core as core
22-
from paddle.fluid import Program, program_guard
2322
from paddle.fluid.framework import _test_eager_guard
2423

2524

@@ -39,106 +38,6 @@ def group_norm_naive_for_general_dimension(x, scale, bias, epsilon, groups):
3938
return output
4039

4140

42-
class TestDygraphGroupNormv2(unittest.TestCase):
43-
def test_dygraph(self):
44-
places = [fluid.CPUPlace()]
45-
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
46-
places.append(fluid.CUDAPlace(0))
47-
shapes = [
48-
[2, 2, 2, 2],
49-
[2, 2, 4],
50-
[4, 2],
51-
[4, 2, 6, 6, 2],
52-
[2, 2, 2, 2, 2, 2],
53-
]
54-
for p in places:
55-
56-
def compute_v1(x):
57-
with fluid.dygraph.guard(p):
58-
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
59-
y = gn(fluid.dygraph.to_variable(x))
60-
return y.numpy()
61-
62-
def compute_v2(x):
63-
with fluid.dygraph.guard(p):
64-
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
65-
y = gn(fluid.dygraph.to_variable(x))
66-
return y.numpy()
67-
68-
def test_weight_bias_false():
69-
with fluid.dygraph.guard(p):
70-
gn = paddle.nn.GroupNorm(
71-
num_channels=2,
72-
num_groups=2,
73-
weight_attr=False,
74-
bias_attr=False,
75-
)
76-
77-
def test_nn_exception():
78-
with fluid.dygraph.guard(p):
79-
80-
def attr_data_format():
81-
out = paddle.nn.GroupNorm(
82-
num_groups=2, num_channels=2, data_format="CNHW"
83-
)
84-
85-
self.assertRaises(ValueError, attr_data_format)
86-
87-
for shape in shapes:
88-
x = np.random.randn(*shape).astype("float32")
89-
y1 = compute_v1(x)
90-
y2 = compute_v2(x)
91-
result = np.allclose(y1, y2, atol=1e-5)
92-
if not result:
93-
print("y1:", y1, "\ty2:", y2)
94-
self.assertTrue(result)
95-
test_weight_bias_false()
96-
test_nn_exception()
97-
98-
def test_static(self):
99-
paddle.enable_static()
100-
places = [fluid.CPUPlace()]
101-
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
102-
places.append(fluid.CUDAPlace(0))
103-
shapes = [
104-
[2, 6, 2, 2],
105-
[2, 6, 4],
106-
[4, 6],
107-
[4, 6, 6, 6, 2],
108-
[4, 6, 2, 2, 2, 2],
109-
]
110-
for p in places:
111-
exe = fluid.Executor(p)
112-
113-
def compute_v1(x_np):
114-
with program_guard(Program(), Program()):
115-
gn = fluid.dygraph.GroupNorm(channels=6, groups=2)
116-
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
117-
y = gn(x)
118-
exe.run(fluid.default_startup_program())
119-
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
120-
return r
121-
122-
def compute_v2(x_np):
123-
with program_guard(Program(), Program()):
124-
gn = paddle.nn.GroupNorm(num_channels=6, num_groups=2)
125-
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
126-
y = gn(x)
127-
exe.run(fluid.default_startup_program())
128-
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
129-
return r
130-
131-
for shape in shapes:
132-
x = np.random.randn(*shape).astype("float32")
133-
y1 = compute_v1(x)
134-
y2 = compute_v2(x)
135-
np.testing.assert_allclose(y1, y2, rtol=1e-05, atol=1e-05)
136-
137-
def test_eager_api(self):
138-
with _test_eager_guard():
139-
self.test_dygraph()
140-
141-
14241
class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase):
14342
def test_numerical_accuracy(self):
14443
paddle.disable_static()

python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
import paddle
2222
import paddle.fluid as fluid
2323
import paddle.fluid.framework as framework
24-
from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GroupNorm
24+
from paddle.fluid.dygraph.nn import BatchNorm, Embedding
2525
from paddle.nn import Linear
2626

2727

@@ -122,10 +122,10 @@ def testLoadStaticModel(self):
122122
name='groupnorm_in', shape=[None, 8, 32, 32], dtype='float32'
123123
)
124124
groupnorm_out1 = paddle.static.nn.group_norm(
125-
input=groupnorm_in, groups=4
125+
input=groupnorm_in, groups=4, param_attr=True, bias_attr=True
126126
)
127127
groupnorm_out2 = paddle.static.nn.group_norm(
128-
input=groupnorm_in, groups=4
128+
input=groupnorm_in, groups=4, param_attr=True, bias_attr=True
129129
)
130130
'''
131131
spec_norm = fluid.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32')
@@ -212,8 +212,8 @@ def __init__(self):
212212
self.layer_norm_1 = paddle.nn.LayerNorm([10])
213213
self.layer_norm_2 = paddle.nn.LayerNorm(10)
214214

215-
self.group_norm1 = GroupNorm(8, 4)
216-
self.gourp_norm2 = GroupNorm(8, 4)
215+
self.group_norm1 = paddle.nn.GroupNorm(4, 8)
216+
self.gourp_norm2 = paddle.nn.GroupNorm(4, 8)
217217

218218
self.w_1 = self.create_parameter(
219219
[100, 100], dtype='float32', attr="weight_test_1"

0 commit comments

Comments
 (0)