diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 70003b48cc8972..83845a7b4234ba 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -1153,7 +1153,7 @@ def GenerateNodeCreationCodes(self, for_backward=False, is_inplaced=False): for name, (ttype, pos) in forward_inputs_position_map.items(): if name in need_pre_contiguous_set: pre_contiguous_list.append( - f"{indent}const auto& {name}_tmp = (require_any_grad && {name}.is_dense_tensor() && !std::dynamic_pointer_cast({name}.impl())->meta().is_contiguous()) ? paddle::Tensor(std::make_shared(paddle::experimental::Trans2Contiguous(*(std::dynamic_pointer_cast({name}.impl())))), {name}.mutable_autograd_meta()) : {name};" + f"{indent}const auto& {name}_tmp = (require_any_grad && {name}.is_dense_tensor() && !std::dynamic_pointer_cast({name}.impl())->meta().is_contiguous()) ? paddle::Tensor(std::make_shared(paddle::experimental::Trans2Contiguous(*(std::dynamic_pointer_cast({name}.impl())))), {name}.mutable_autograd_meta(), {name}.name()) : {name};" ) self.inputs_call_list_tmp[pos] = ( self.inputs_call_list_tmp[pos] + '_tmp' diff --git a/paddle/fluid/eager/to_static/run_program_op_func.h b/paddle/fluid/eager/to_static/run_program_op_func.h index 478816551ef375..cdb4de66ae1898 100644 --- a/paddle/fluid/eager/to_static/run_program_op_func.h +++ b/paddle/fluid/eager/to_static/run_program_op_func.h @@ -124,7 +124,8 @@ static std::vector Trans2ContiguousTensors( std::make_shared( paddle::experimental::Trans2Contiguous( *(std::dynamic_pointer_cast(t.impl())))), - t.mutable_autograd_meta()); + t.mutable_autograd_meta(), + t.name()); } else { res.emplace_back(t); } diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 70aa63c0d55fa8..39ec0e7fe31a38 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -201,8 +201,8 @@ static void ShareTensorsIntoScopeWithName( const std::vector &tensor_names, paddle::framework::Scope *scope) { for (size_t i = 0; i < tensors.size(); ++i) { - VLOG(4) << "Share Tensor Into Scope: " << i; auto name = tensor_names[i]; + VLOG(4) << "Share Tensor Into Scope: " << name; if (name == paddle::framework::kFakeVarName || name == paddle::framework::kEmptyVarName) { continue; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 957d35e6957f50..353f6a43584afb 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1831,7 +1831,8 @@ static PyObject* tensor__setitem_dygraph(TensorObject* self, paddle::experimental::Trans2Contiguous( *(std::dynamic_pointer_cast( transback_sub_tensor.impl())))), - transback_sub_tensor.mutable_autograd_meta()) + transback_sub_tensor.mutable_autograd_meta(), + transback_sub_tensor.name()) : transback_sub_tensor; grad_node = std::shared_ptr( diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 636a4198640cdb..e80c0c147d3325 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -142,14 +142,16 @@ class PADDLE_API Tensor final { explicit Tensor(const std::string& name) : name_(name) {} /** - * @brief Construct a new Tensor object by a TensorBase pointer and - * autograd_meta + * @brief Construct a new Tensor object by a TensorBase pointer, autograd meta + * and name * * @param tensor_impl * @param autograd_meta + * @param name */ Tensor(std::shared_ptr tensor_impl, - std::shared_ptr autograd_meta); + std::shared_ptr autograd_meta, + const std::string& name); /* Part 2: Dimension, DataType and DataLayout methods */ diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index 2ab68b2e846f2e..54c949e688c79b 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -53,8 +53,11 @@ Tensor::Tensor(std::shared_ptr tensor_impl) } Tensor::Tensor(std::shared_ptr tensor_impl, - std::shared_ptr autograd_meta) - : impl_(std::move(tensor_impl)), autograd_meta_(std::move(autograd_meta)) { + std::shared_ptr autograd_meta, + const std::string &name) + : impl_(std::move(tensor_impl)), + autograd_meta_(std::move(autograd_meta)), + name_(name) { PADDLE_ENFORCE_NOT_NULL( impl_, phi::errors::InvalidArgument("TensorImpl with nullptr is not supported"));