Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1153,7 +1153,7 @@ def GenerateNodeCreationCodes(self, for_backward=False, is_inplaced=False):
for name, (ttype, pos) in forward_inputs_position_map.items():
if name in need_pre_contiguous_set:
pre_contiguous_list.append(
f"{indent}const auto& {name}_tmp = (require_any_grad && {name}.is_dense_tensor() && !std::dynamic_pointer_cast<phi::DenseTensor>({name}.impl())->meta().is_contiguous()) ? paddle::Tensor(std::make_shared<phi::DenseTensor>(paddle::experimental::Trans2Contiguous(*(std::dynamic_pointer_cast<phi::DenseTensor>({name}.impl())))), {name}.mutable_autograd_meta()) : {name};"
f"{indent}const auto& {name}_tmp = (require_any_grad && {name}.is_dense_tensor() && !std::dynamic_pointer_cast<phi::DenseTensor>({name}.impl())->meta().is_contiguous()) ? paddle::Tensor(std::make_shared<phi::DenseTensor>(paddle::experimental::Trans2Contiguous(*(std::dynamic_pointer_cast<phi::DenseTensor>({name}.impl())))), {name}.mutable_autograd_meta(), {name}.name()) : {name};"
)
self.inputs_call_list_tmp[pos] = (
self.inputs_call_list_tmp[pos] + '_tmp'
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/eager/to_static/run_program_op_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@ static std::vector<paddle::Tensor> Trans2ContiguousTensors(
std::make_shared<phi::DenseTensor>(
paddle::experimental::Trans2Contiguous(
*(std::dynamic_pointer_cast<phi::DenseTensor>(t.impl())))),
t.mutable_autograd_meta());
t.mutable_autograd_meta(),
t.name());
} else {
res.emplace_back(t);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/to_static/run_program_op_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ static void ShareTensorsIntoScopeWithName(
const std::vector<std::string> &tensor_names,
paddle::framework::Scope *scope) {
for (size_t i = 0; i < tensors.size(); ++i) {
VLOG(4) << "Share Tensor Into Scope: " << i;
auto name = tensor_names[i];
VLOG(4) << "Share Tensor Into Scope: " << name;
if (name == paddle::framework::kFakeVarName ||
name == paddle::framework::kEmptyVarName) {
continue;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1831,7 +1831,8 @@ static PyObject* tensor__setitem_dygraph(TensorObject* self,
paddle::experimental::Trans2Contiguous(
*(std::dynamic_pointer_cast<phi::DenseTensor>(
transback_sub_tensor.impl())))),
transback_sub_tensor.mutable_autograd_meta())
transback_sub_tensor.mutable_autograd_meta(),
transback_sub_tensor.name())
: transback_sub_tensor;

grad_node = std::shared_ptr<SetValueWithTensorGradNode>(
Expand Down
8 changes: 5 additions & 3 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,16 @@ class PADDLE_API Tensor final {
explicit Tensor(const std::string& name) : name_(name) {}

/**
* @brief Construct a new Tensor object by a TensorBase pointer and
* autograd_meta
* @brief Construct a new Tensor object by a TensorBase pointer, autograd meta
* and name
*
* @param tensor_impl
* @param autograd_meta
* @param name
*/
Tensor(std::shared_ptr<phi::TensorBase> tensor_impl,
std::shared_ptr<AbstractAutogradMeta> autograd_meta);
std::shared_ptr<AbstractAutogradMeta> autograd_meta,
const std::string& name);

/* Part 2: Dimension, DataType and DataLayout methods */

Expand Down
7 changes: 5 additions & 2 deletions paddle/phi/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,11 @@ Tensor::Tensor(std::shared_ptr<phi::TensorBase> tensor_impl)
}

Tensor::Tensor(std::shared_ptr<phi::TensorBase> tensor_impl,
std::shared_ptr<AbstractAutogradMeta> autograd_meta)
: impl_(std::move(tensor_impl)), autograd_meta_(std::move(autograd_meta)) {
std::shared_ptr<AbstractAutogradMeta> autograd_meta,
const std::string &name)
: impl_(std::move(tensor_impl)),
autograd_meta_(std::move(autograd_meta)),
name_(name) {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zyfncg review 这里,这个构造函数是在 #57352 引入的,仅用于 trans2contiguous,这个 API 是否可以修改?如果不可以修改是新增一个更合适嘛?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以直接改这个API,新增接口的代价会更高一些

PADDLE_ENFORCE_NOT_NULL(
impl_,
phi::errors::InvalidArgument("TensorImpl with nullptr is not supported"));
Expand Down