-
Notifications
You must be signed in to change notification settings - Fork 5.9k
Closed
Description
1)aistudio
2)v100
3)paddle 1.8.0
- 复现信息:
ack (most recent call last) in
156 avg_loss.backward()
157
--> 158 optimizer.minimize(avg_loss,parameter_list=track.parameters())
159
160
</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-185> in minimize(self, loss, startup_program, parameter_list, no_grad_set)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
201 def impl(func, *args, **kwargs):
202 with switch_tracer_mode_guard(is_train=False):
--> 203 return func(*args, **kwargs)
204
205 return impl(func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
835
836 optimize_ops = self.apply_optimize(
--> 837 loss, startup_program=startup_program, params_grads=params_grads)
838
839 return optimize_ops, params_grads
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in apply_optimize(self, loss, startup_program, params_grads)
745 params_grads = append_regularization_ops(params_grads,
746 self.regularization)
--> 747 optimize_ops = self._create_optimization_pass(params_grads)
748 else:
749 program = loss.block.program
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_optimization_pass(self, parameters_and_grads)
549 continue
550 if param_and_grad[0].trainable is True:
--> 551 self._append_optimize_op(target_block, param_and_grad)
552 else:
553 for param_and_grad in parameters_and_grads:
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _append_optimize_op(self, block, param_and_grad)
1037 velocity_acc = self._get_accumulator(self._velocity_acc_str,
1038 param_and_grad[0])
-> 1039 lr = self._create_param_lr(param_and_grad)
1040
1041 if framework.in_dygraph_mode():
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_param_lr(self, param_and_grad)
386 is_with_opt=True), framework.name_scope(
387 'scale_with_param_lr'):
--> 388 return self._global_learning_rate() * param_lr
389
390 def _create_accumulators(self, block, parameters):
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in impl(self, other_var)
184 else:
185 # add fill_op
--> 186 other_var = create_scalar(value=other_var, dtype=lhs_dtype)
187
188 rhs_dtype = safe_get_dtype(other_var)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_scalar(value, dtype)
50
51 def create_scalar(value, dtype):
---> 52 return create_tensor(value, dtype, shape=[1])
53
54 def astype(self, dtype):
</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-198> in create_tensor(value, dtype, shape)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
201 def impl(func, *args, **kwargs):
202 with switch_tracer_mode_guard(is_train=False):
--> 203 return func(*args, **kwargs)
204
205 return impl(func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_tensor(value, dtype, shape)
45 out = _varbase_creator(dtype=dtype)
46 out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
---> 47 'value', value, 'force_cpu', False)
48 out.stop_gradient = True
49 return out
RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
ack (most recent call last) in
156 avg_loss.backward()
157
--> 158 optimizer.minimize(avg_loss,parameter_list=track.parameters())
159
160
</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-185> in minimize(self, loss, startup_program, parameter_list, no_grad_set)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
201 def impl(func, *args, **kwargs):
202 with switch_tracer_mode_guard(is_train=False):
--> 203 return func(*args, **kwargs)
204
205 return impl(func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
835
836 optimize_ops = self.apply_optimize(
--> 837 loss, startup_program=startup_program, params_grads=params_grads)
838
839 return optimize_ops, params_grads
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in apply_optimize(self, loss, startup_program, params_grads)
745 params_grads = append_regularization_ops(params_grads,
746 self.regularization)
--> 747 optimize_ops = self._create_optimization_pass(params_grads)
748 else:
749 program = loss.block.program
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_optimization_pass(self, parameters_and_grads)
549 continue
550 if param_and_grad[0].trainable is True:
--> 551 self._append_optimize_op(target_block, param_and_grad)
552 else:
553 for param_and_grad in parameters_and_grads:
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _append_optimize_op(self, block, param_and_grad)
1037 velocity_acc = self._get_accumulator(self._velocity_acc_str,
1038 param_and_grad[0])
-> 1039 lr = self._create_param_lr(param_and_grad)
1040
1041 if framework.in_dygraph_mode():
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_param_lr(self, param_and_grad)
386 is_with_opt=True), framework.name_scope(
387 'scale_with_param_lr'):
--> 388 return self._global_learning_rate() * param_lr
389
390 def _create_accumulators(self, block, parameters):
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in impl(self, other_var)
184 else:
185 # add fill_op
--> 186 other_var = create_scalar(value=other_var, dtype=lhs_dtype)
187
188 rhs_dtype = safe_get_dtype(other_var)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_scalar(value, dtype)
50
51 def create_scalar(value, dtype):
---> 52 return create_tensor(value, dtype, shape=[1])
53
54 def astype(self, dtype):
</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-198> in create_tensor(value, dtype, shape)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
201 def impl(func, *args, **kwargs):
202 with switch_tracer_mode_guard(is_train=False):
--> 203 return func(*args, **kwargs)
204
205 return impl(func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_tensor(value, dtype, shape)
45 out = _varbase_creator(dtype=dtype)
46 out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
---> 47 'value', value, 'force_cpu', False)
48 out.stop_gradient = True
49 return out
RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
Metadata
Metadata
Assignees
Labels
No labels