Skip to content

Commit c35c58a

Browse files
committed
update
1 parent d74f91a commit c35c58a

File tree

5 files changed

+565
-1
lines changed

5 files changed

+565
-1
lines changed

python/paddle/fluid/tests/unittests/test_layers.py

Lines changed: 259 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2021,6 +2021,265 @@ def test_type():
20212021

20222022
self.assertRaises(TypeError, test_type)
20232023

2024+
def test_spectral_norm(self):
2025+
if core.is_compiled_with_cuda():
2026+
place = core.CUDAPlace(0)
2027+
else:
2028+
place = core.CPUPlace()
2029+
2030+
shape = (2, 4, 3, 3)
2031+
2032+
input = np.random.random(shape).astype('float32')
2033+
2034+
with self.static_graph():
2035+
Weight = fluid.layers.data(
2036+
name='Weight',
2037+
shape=shape,
2038+
dtype='float32',
2039+
lod_level=1,
2040+
append_batch_size=False,
2041+
)
2042+
ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2)
2043+
static_ret = self.get_static_graph_result(
2044+
feed={
2045+
'Weight': fluid.create_lod_tensor(
2046+
data=input, recursive_seq_lens=[[1, 1]], place=place
2047+
),
2048+
},
2049+
fetch_list=[ret],
2050+
with_lod=True,
2051+
)[0]
2052+
2053+
with self.static_graph():
2054+
Weight = fluid.layers.data(
2055+
name='Weight',
2056+
shape=shape,
2057+
dtype='float32',
2058+
lod_level=1,
2059+
append_batch_size=False,
2060+
)
2061+
spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
2062+
ret = spectralNorm(Weight)
2063+
static_ret2 = self.get_static_graph_result(
2064+
feed={
2065+
'Weight': fluid.create_lod_tensor(
2066+
data=input, recursive_seq_lens=[[1, 1]], place=place
2067+
)
2068+
},
2069+
fetch_list=[ret],
2070+
with_lod=True,
2071+
)[0]
2072+
2073+
with self.dynamic_graph():
2074+
with _test_eager_guard():
2075+
spectralNorm = paddle.nn.SpectralNorm(
2076+
shape, dim=1, power_iters=2
2077+
)
2078+
dy_eager_ret = spectralNorm(base.to_variable(input))
2079+
dy_eager_rlt_value = dy_eager_ret.numpy()
2080+
2081+
spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
2082+
dy_ret = spectralNorm(base.to_variable(input))
2083+
dy_rlt_value = dy_ret.numpy()
2084+
2085+
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
2086+
np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
2087+
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
2088+
2089+
def test_tree_conv(self):
2090+
if core.is_compiled_with_cuda():
2091+
place = core.CUDAPlace(0)
2092+
else:
2093+
place = core.CPUPlace()
2094+
adj_array = [1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10]
2095+
adj = np.array(adj_array).reshape((1, 9, 2)).astype('int32')
2096+
adj = np.tile(adj, (1, 1, 1))
2097+
vectors = np.random.random((1, 10, 5)).astype('float32')
2098+
with self.static_graph():
2099+
NodesVector = fluid.layers.data(
2100+
name='NodesVector',
2101+
shape=(1, 10, 5),
2102+
dtype='float32',
2103+
lod_level=1,
2104+
append_batch_size=False,
2105+
)
2106+
EdgeSet = fluid.layers.data(
2107+
name='EdgeSet',
2108+
shape=(1, 9, 2),
2109+
dtype='int32',
2110+
lod_level=1,
2111+
append_batch_size=False,
2112+
)
2113+
ret = fluid.contrib.layers.tree_conv(
2114+
nodes_vector=NodesVector,
2115+
edge_set=EdgeSet,
2116+
output_size=6,
2117+
num_filters=1,
2118+
max_depth=2,
2119+
)
2120+
static_ret = self.get_static_graph_result(
2121+
feed={
2122+
'NodesVector': fluid.create_lod_tensor(
2123+
data=vectors, recursive_seq_lens=[[1]], place=place
2124+
),
2125+
'EdgeSet': fluid.create_lod_tensor(
2126+
data=adj, recursive_seq_lens=[[1]], place=place
2127+
),
2128+
},
2129+
fetch_list=[ret],
2130+
with_lod=False,
2131+
)[0]
2132+
2133+
with self.static_graph():
2134+
NodesVector = fluid.layers.data(
2135+
name='NodesVector',
2136+
shape=(1, 10, 5),
2137+
dtype='float32',
2138+
lod_level=1,
2139+
append_batch_size=False,
2140+
)
2141+
EdgeSet = fluid.layers.data(
2142+
name='EdgeSet',
2143+
shape=(1, 9, 2),
2144+
dtype='int32',
2145+
lod_level=1,
2146+
append_batch_size=False,
2147+
)
2148+
treeConv = nn.TreeConv(
2149+
feature_size=5, output_size=6, num_filters=1, max_depth=2
2150+
)
2151+
ret = treeConv(NodesVector, EdgeSet)
2152+
static_ret2 = self.get_static_graph_result(
2153+
feed={
2154+
'NodesVector': fluid.create_lod_tensor(
2155+
data=vectors, recursive_seq_lens=[[1]], place=place
2156+
),
2157+
'EdgeSet': fluid.create_lod_tensor(
2158+
data=adj, recursive_seq_lens=[[1]], place=place
2159+
),
2160+
},
2161+
fetch_list=[ret],
2162+
with_lod=False,
2163+
)[0]
2164+
2165+
with self.dynamic_graph():
2166+
with _test_eager_guard():
2167+
treeConv = nn.TreeConv(
2168+
feature_size=5, output_size=6, num_filters=1, max_depth=2
2169+
)
2170+
dy_eager_ret = treeConv(
2171+
base.to_variable(vectors), base.to_variable(adj)
2172+
)
2173+
dy_eager_rlt_value = dy_eager_ret.numpy()
2174+
2175+
treeConv = nn.TreeConv(
2176+
feature_size=5, output_size=6, num_filters=1, max_depth=2
2177+
)
2178+
dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj))
2179+
dy_rlt_value = dy_ret.numpy()
2180+
2181+
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
2182+
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
2183+
np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
2184+
2185+
with self.dynamic_graph():
2186+
with _test_eager_guard():
2187+
custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
2188+
weight_attr = fluid.ParamAttr(
2189+
initializer=fluid.initializer.NumpyArrayInitializer(
2190+
custom_weight
2191+
)
2192+
)
2193+
treeConv1 = nn.TreeConv(
2194+
feature_size=5,
2195+
output_size=6,
2196+
num_filters=1,
2197+
max_depth=2,
2198+
bias_attr='eager_tc1_b',
2199+
)
2200+
treeConv2 = nn.TreeConv(
2201+
feature_size=5,
2202+
output_size=6,
2203+
num_filters=1,
2204+
max_depth=2,
2205+
param_attr=weight_attr,
2206+
bias_attr='eager_tc2_b',
2207+
)
2208+
dy_ret1 = treeConv1(
2209+
base.to_variable(vectors), base.to_variable(adj)
2210+
)
2211+
dy_ret2 = treeConv2(
2212+
base.to_variable(vectors), base.to_variable(adj)
2213+
)
2214+
self.assertFalse(
2215+
np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2216+
)
2217+
treeConv2.weight.set_value(treeConv1.weight.numpy())
2218+
treeConv2.bias.set_value(treeConv1.bias)
2219+
dy_ret1 = treeConv1(
2220+
base.to_variable(vectors), base.to_variable(adj)
2221+
)
2222+
dy_ret2 = treeConv2(
2223+
base.to_variable(vectors), base.to_variable(adj)
2224+
)
2225+
np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2226+
2227+
treeConv2.weight = treeConv1.weight
2228+
treeConv2.bias = treeConv1.bias
2229+
np.testing.assert_array_equal(
2230+
treeConv1.weight.numpy(), treeConv2.weight.numpy()
2231+
)
2232+
np.testing.assert_array_equal(
2233+
treeConv1.bias.numpy(), treeConv2.bias.numpy()
2234+
)
2235+
2236+
custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
2237+
weight_attr = fluid.ParamAttr(
2238+
initializer=fluid.initializer.NumpyArrayInitializer(
2239+
custom_weight
2240+
)
2241+
)
2242+
treeConv1 = nn.TreeConv(
2243+
feature_size=5,
2244+
output_size=6,
2245+
num_filters=1,
2246+
max_depth=2,
2247+
bias_attr='tc1_b',
2248+
)
2249+
treeConv2 = nn.TreeConv(
2250+
feature_size=5,
2251+
output_size=6,
2252+
num_filters=1,
2253+
max_depth=2,
2254+
param_attr=weight_attr,
2255+
bias_attr='tc2_b',
2256+
)
2257+
dy_ret1 = treeConv1(
2258+
base.to_variable(vectors), base.to_variable(adj)
2259+
)
2260+
dy_ret2 = treeConv2(
2261+
base.to_variable(vectors), base.to_variable(adj)
2262+
)
2263+
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
2264+
treeConv2.weight.set_value(treeConv1.weight.numpy())
2265+
treeConv2.bias.set_value(treeConv1.bias)
2266+
dy_ret1 = treeConv1(
2267+
base.to_variable(vectors), base.to_variable(adj)
2268+
)
2269+
dy_ret2 = treeConv2(
2270+
base.to_variable(vectors), base.to_variable(adj)
2271+
)
2272+
np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2273+
2274+
treeConv2.weight = treeConv1.weight
2275+
treeConv2.bias = treeConv1.bias
2276+
np.testing.assert_array_equal(
2277+
treeConv1.weight.numpy(), treeConv2.weight.numpy()
2278+
)
2279+
np.testing.assert_array_equal(
2280+
treeConv1.bias.numpy(), treeConv2.bias.numpy()
2281+
)
2282+
20242283
def test_conv3d_transpose(self):
20252284
input_array = (
20262285
np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')

0 commit comments

Comments
 (0)