From 4a01c2ecec2072b6da598df7e9a5a1b03acf513c Mon Sep 17 00:00:00 2001 From: firestonelib Date: Wed, 25 Aug 2021 20:37:29 +0800 Subject: [PATCH 1/4] fix assign bug support fp16 uint8 --- paddle/fluid/operators/assign_op.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 433cabcfee0104..5a5f26c08feb1f 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -160,7 +160,8 @@ REGISTER_OPERATOR(assign, ops::AssignOp, REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, - int64_t, ops::AssignKernel, bool, + int64_t, ops::AssignKernel, uint8_t, + ops::AssignKernel, bool, ops::AssignKernel, plat::float16, ops::AssignKernel, plat::bfloat16, ops::AssignKernel); @@ -168,7 +169,8 @@ REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, - int64_t, ops::AssignKernel, bool, + int64_t, ops::AssignKernel, uint8_t, + ops::AssignKernel, bool, ops::AssignKernel, plat::float16, ops::AssignKernel); #endif From 1aa26f312da3dd7b6837dacd48b5fce970fc6369 Mon Sep 17 00:00:00 2001 From: firestonelib Date: Wed, 25 Aug 2021 20:53:30 +0800 Subject: [PATCH 2/4] fix dygragh assign bool bug --- python/paddle/fluid/dygraph/io.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index 2318a08462d5d5..eb616cd752da8d 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -450,8 +450,9 @@ def _append_scale_to_output(self, program): with framework.program_guard(program): for i, out in enumerate(self._output_descs): var = program.global_block().var(out.name()) - var = nn.scale( - var, 1., name="translated_layer/scale_{}".format(i)) + if var.dtype != paddle.bool: + var = nn.scale( + var, 1., name="translated_layer/scale_{}".format(i)) scale_output_vars.append(var) # 2. update output names & descs for i, var in enumerate(scale_output_vars): From 9acf3d373e9f75dbe259ca86201ef2fa96706c23 Mon Sep 17 00:00:00 2001 From: firestonelib Date: Wed, 25 Aug 2021 22:42:22 +0800 Subject: [PATCH 3/4] modify code style --- paddle/fluid/operators/assign_op.cc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 5a5f26c08feb1f..09452b8f68baf2 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -161,16 +161,14 @@ REGISTER_OPERATOR(assign, ops::AssignOp, REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, uint8_t, - ops::AssignKernel, bool, - ops::AssignKernel, plat::float16, - ops::AssignKernel, plat::bfloat16, + ops::AssignKernel, bool, ops::AssignKernel, + plat::float16, ops::AssignKernel, plat::bfloat16, ops::AssignKernel); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, uint8_t, - ops::AssignKernel, bool, - ops::AssignKernel, plat::float16, - ops::AssignKernel); + ops::AssignKernel, bool, ops::AssignKernel, + plat::float16, ops::AssignKernel); #endif From 485c8a75bd4417a92b3250df1596265570de988d Mon Sep 17 00:00:00 2001 From: firestonelib Date: Thu, 26 Aug 2021 12:00:40 +0800 Subject: [PATCH 4/4] revoke bool modification --- python/paddle/fluid/dygraph/io.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index eb616cd752da8d..2318a08462d5d5 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -450,9 +450,8 @@ def _append_scale_to_output(self, program): with framework.program_guard(program): for i, out in enumerate(self._output_descs): var = program.global_block().var(out.name()) - if var.dtype != paddle.bool: - var = nn.scale( - var, 1., name="translated_layer/scale_{}".format(i)) + var = nn.scale( + var, 1., name="translated_layer/scale_{}".format(i)) scale_output_vars.append(var) # 2. update output names & descs for i, var in enumerate(scale_output_vars):