Skip to content

Commit 223c01f

Browse files
authored
fix iscan python bug (#35148)
1 parent 289e181 commit 223c01f

File tree

2 files changed

+16
-25
lines changed

2 files changed

+16
-25
lines changed

python/paddle/fluid/tests/unittests/test_gradient_clip.py

Lines changed: 11 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ def setUp(self):
5656
self.BATCH_SIZE = 2
5757
reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
5858
self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
59+
self.clip_gradient = lambda x: None
5960
self.init()
6061

6162
def init(self):
@@ -67,9 +68,6 @@ def get_places(self):
6768
places.append(fluid.CUDAPlace(0))
6869
return places
6970

70-
def clip_gradient(self, params_grads):
71-
pass
72-
7371
def check_clip_result(self, out, out_clip):
7472
pass
7573

@@ -132,7 +130,6 @@ def check_sparse_gradient_clip(self, place):
132130
data = next(self.train_data())
133131
val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0]
134132
self.assertEqual((1, ), val.shape)
135-
print(val)
136133
self.assertFalse(np.isnan(val))
137134

138135
def backward_and_optimize(self, cost):
@@ -143,11 +140,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip):
143140
def init(self):
144141
self.clip_norm = 0.2
145142

146-
def clip_gradient(self, params_grads):
147-
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)
148-
print(clip)
149-
return clip(params_grads)
150-
151143
def check_clip_result(self, out, out_clip):
152144
global_norm = 0
153145
for v in out:
@@ -179,7 +171,6 @@ def func(params_grads):
179171
def test_new_gradient_clip(self):
180172
def func(params_grads):
181173
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)
182-
print(clip)
183174
return clip(params_grads)
184175

185176
self.clip_gradient = func
@@ -236,11 +227,6 @@ class TestGradientClipByNorm(TestGradientClip):
236227
def init(self):
237228
self.clip_norm = 0.2
238229

239-
def clip_gradient(self, params_grads):
240-
clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)
241-
print(clip)
242-
return clip(params_grads)
243-
244230
def check_clip_result(self, out, out_clip):
245231
for u, v in zip(out, out_clip):
246232
norm = np.sqrt(np.sum(np.power(u, 2)))
@@ -253,6 +239,11 @@ def check_clip_result(self, out, out_clip):
253239

254240
# test whether the ouput is right when use grad_clip
255241
def test_gradient_clip(self):
242+
def func(params_grads):
243+
clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)
244+
return clip(params_grads)
245+
246+
self.clip_gradient = func
256247
self.check_gradient_clip(fluid.CPUPlace())
257248

258249
# if grad is None or not need clip
@@ -280,11 +271,6 @@ def init(self):
280271
self.max = 0.2
281272
self.min = 0.1
282273

283-
def clip_gradient(self, params_grads):
284-
clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min)
285-
print(clip)
286-
return clip(params_grads)
287-
288274
def check_clip_result(self, out, out_clip):
289275
for i, v in enumerate(out):
290276
out[i] = np.clip(v, self.min, self.max)
@@ -297,6 +283,11 @@ def check_clip_result(self, out, out_clip):
297283

298284
# test whether the ouput is right when use grad_clip
299285
def test_gradient_clip(self):
286+
def func(params_grads):
287+
clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min)
288+
return clip(params_grads)
289+
290+
self.clip_gradient = func
300291
self.check_gradient_clip(fluid.CPUPlace())
301292

302293
# if grad is None or not need clip

python/paddle/tensor/to_string.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,14 +101,14 @@ def _to_sumary(var):
101101
return var
102102
elif len(var.shape) == 1:
103103
if var.shape[0] > 2 * edgeitems:
104-
return np.concatenate([var[:edgeitems], var[-edgeitems:]])
104+
return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]])
105105
else:
106106
return var
107107
else:
108108
# recursively handle all dimensions
109109
if var.shape[0] > 2 * edgeitems:
110110
begin = [x for x in var[:edgeitems]]
111-
end = [x for x in var[-edgeitems:]]
111+
end = [x for x in var[(-1 * edgeitems):]]
112112
return np.stack([_to_sumary(x) for x in (begin + end)])
113113
else:
114114
return np.stack([_to_sumary(x) for x in var])
@@ -162,10 +162,10 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False):
162162
if sumary and var.shape[0] > 2 * edgeitems:
163163
items = [
164164
_format_item(item, max_width, signed)
165-
for item in list(var)[:DEFAULT_PRINT_OPTIONS.edgeitems]
165+
for item in list(var)[:edgeitems]
166166
] + ['...'] + [
167167
_format_item(item, max_width, signed)
168-
for item in list(var)[-DEFAULT_PRINT_OPTIONS.edgeitems:]
168+
for item in list(var)[(-1 * edgeitems):]
169169
]
170170
else:
171171
items = [
@@ -181,7 +181,7 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False):
181181
for x in var[:edgeitems]
182182
] + ['...'] + [
183183
_format_tensor(x, sumary, indent + 1, max_width, signed)
184-
for x in var[-edgeitems:]
184+
for x in var[(-1 * edgeitems):]
185185
]
186186
else:
187187
vars = [

0 commit comments

Comments
 (0)