@@ -56,6 +56,7 @@ def setUp(self):
5656 self .BATCH_SIZE = 2
5757 reader = fake_imdb_reader (self .word_dict_len , self .BATCH_SIZE * 100 )
5858 self .train_data = paddle .batch (reader , batch_size = self .BATCH_SIZE )
59+ self .clip_gradient = lambda x : None
5960 self .init ()
6061
6162 def init (self ):
@@ -67,9 +68,6 @@ def get_places(self):
6768 places .append (fluid .CUDAPlace (0 ))
6869 return places
6970
70- def clip_gradient (self , params_grads ):
71- pass
72-
7371 def check_clip_result (self , out , out_clip ):
7472 pass
7573
@@ -132,7 +130,6 @@ def check_sparse_gradient_clip(self, place):
132130 data = next (self .train_data ())
133131 val = exe .run (prog , feed = feeder .feed (data ), fetch_list = [cost ])[0 ]
134132 self .assertEqual ((1 , ), val .shape )
135- print (val )
136133 self .assertFalse (np .isnan (val ))
137134
138135 def backward_and_optimize (self , cost ):
@@ -143,11 +140,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip):
143140 def init (self ):
144141 self .clip_norm = 0.2
145142
146- def clip_gradient (self , params_grads ):
147- clip = fluid .clip .GradientClipByGlobalNorm (clip_norm = self .clip_norm )
148- print (clip )
149- return clip (params_grads )
150-
151143 def check_clip_result (self , out , out_clip ):
152144 global_norm = 0
153145 for v in out :
@@ -179,7 +171,6 @@ def func(params_grads):
179171 def test_new_gradient_clip (self ):
180172 def func (params_grads ):
181173 clip = fluid .clip .GradientClipByGlobalNorm (clip_norm = self .clip_norm )
182- print (clip )
183174 return clip (params_grads )
184175
185176 self .clip_gradient = func
@@ -236,11 +227,6 @@ class TestGradientClipByNorm(TestGradientClip):
236227 def init (self ):
237228 self .clip_norm = 0.2
238229
239- def clip_gradient (self , params_grads ):
240- clip = fluid .clip .GradientClipByNorm (clip_norm = self .clip_norm )
241- print (clip )
242- return clip (params_grads )
243-
244230 def check_clip_result (self , out , out_clip ):
245231 for u , v in zip (out , out_clip ):
246232 norm = np .sqrt (np .sum (np .power (u , 2 )))
@@ -253,6 +239,11 @@ def check_clip_result(self, out, out_clip):
253239
254240 # test whether the ouput is right when use grad_clip
255241 def test_gradient_clip (self ):
242+ def func (params_grads ):
243+ clip = fluid .clip .GradientClipByNorm (clip_norm = self .clip_norm )
244+ return clip (params_grads )
245+
246+ self .clip_gradient = func
256247 self .check_gradient_clip (fluid .CPUPlace ())
257248
258249 # if grad is None or not need clip
@@ -280,11 +271,6 @@ def init(self):
280271 self .max = 0.2
281272 self .min = 0.1
282273
283- def clip_gradient (self , params_grads ):
284- clip = fluid .clip .GradientClipByValue (max = self .max , min = self .min )
285- print (clip )
286- return clip (params_grads )
287-
288274 def check_clip_result (self , out , out_clip ):
289275 for i , v in enumerate (out ):
290276 out [i ] = np .clip (v , self .min , self .max )
@@ -297,6 +283,11 @@ def check_clip_result(self, out, out_clip):
297283
298284 # test whether the ouput is right when use grad_clip
299285 def test_gradient_clip (self ):
286+ def func (params_grads ):
287+ clip = fluid .clip .GradientClipByValue (max = self .max , min = self .min )
288+ return clip (params_grads )
289+
290+ self .clip_gradient = func
300291 self .check_gradient_clip (fluid .CPUPlace ())
301292
302293 # if grad is None or not need clip
0 commit comments