Skip to content

Commit 2682045

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into del_densetensor_mem_desc_2
2 parents 84095d9 + 36e1d02 commit 2682045

183 files changed

Lines changed: 4409 additions & 2196 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.gitignore

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,5 +108,13 @@ paddle/fluid/pir/dialect/operator/ir/pd_api.*
108108
paddle/fluid/pir/dialect/operator/ir/op_decomp.cc
109109
paddle/fluid/pir/dialect/operator/ir/pd_op_vjp.cc
110110
paddle/fluid/pir/dialect/operator/ir/pd_op.*
111+
paddle/fluid/pir/dialect/operator/ir/pd_op_bwd.*
112+
paddle/fluid/pir/dialect/operator/ir/pd_op_fused.*
113+
paddle/fluid/pir/dialect/operator/ir/pd_op_fused_bwd.*
114+
paddle/fluid/pir/dialect/operator/ir/pd_pir_op.*
115+
paddle/fluid/pir/dialect/operator/ir/pd_pir_op_bwd.*
116+
paddle/fluid/pir/dialect/operator/ir/pd_pir_op_update.*
117+
paddle/fluid/pir/dialect/operator/ir/pd_op_info.*
111118
paddle/cinn/hlir/dialect/generated/ops.parsed.yaml
112119
paddle/cinn/hlir/dialect/operator/ir/cinn_op.*
120+
paddle/cinn/hlir/dialect/operator/ir/cinn_op_info.*

cmake/generic.cmake

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,6 @@ function(hip_test TARGET_NAME)
833833
${hip_test_DEPS}
834834
paddle_gtest_main
835835
lod_tensor
836-
memory
837836
gtest
838837
glog
839838
phi
@@ -843,7 +842,6 @@ function(hip_test TARGET_NAME)
843842
${hip_test_DEPS}
844843
paddle_gtest_main
845844
lod_tensor
846-
memory
847845
gtest
848846
phi
849847
glog)
@@ -940,7 +938,6 @@ function(xpu_test TARGET_NAME)
940938
${xpu_test_DEPS}
941939
paddle_gtest_main
942940
lod_tensor
943-
memory
944941
gtest
945942
phi
946943
glog
@@ -950,7 +947,6 @@ function(xpu_test TARGET_NAME)
950947
${xpu_test_DEPS}
951948
paddle_gtest_main
952949
lod_tensor
953-
memory
954950
gtest
955951
phi
956952
glog)

paddle/cinn/hlir/dialect/operator/ir/CMakeLists.txt

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,9 @@ if(NOT CINN_ONLY)
3030
set(cinn_op_header_file_tmp ${cinn_op_header_file}.tmp)
3131
set(cinn_op_source_file_tmp ${cinn_op_source_file}.tmp)
3232

33+
set(cinn_op_info_file ${CINN_DIALECT_SOURCE_DIR}/cinn_op_info.cc)
34+
set(cinn_op_info_file_tmp ${cinn_op_info_file}.tmp)
35+
3336
execute_process(
3437
COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_op_dir}
3538
COMMAND ${PYTHON_EXECUTABLE} ${cinn_op_gen_parsed_yaml_file} --op_yaml_path
@@ -41,9 +44,11 @@ if(NOT CINN_ONLY)
4144
${cinn_op_parsed_yaml_files} --op_compat_yaml_file
4245
${cinn_op_compat_yaml_file} --namespaces ${cinn_op_namespace}
4346
--dialect_name ${cinn_op_dialect_name} --op_def_h_file
44-
${cinn_op_header_file_tmp} --op_def_cc_file ${cinn_op_source_file_tmp})
47+
${cinn_op_header_file_tmp} --op_info_file ${cinn_op_info_file_tmp}
48+
--op_def_cc_file ${cinn_op_source_file_tmp})
4549

46-
set(generated_files_cinn_op "${cinn_op_header_file}" "${cinn_op_source_file}")
50+
set(generated_files_cinn_op "${cinn_op_header_file}" "${cinn_op_info_file}"
51+
"${cinn_op_source_file}")
4752
foreach(generated_file ${generated_files_cinn_op})
4853
if(EXISTS "${generated_file}.tmp" AND EXISTS "${generated_file}")
4954
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different
@@ -61,6 +66,7 @@ if(NOT CINN_ONLY)
6166
SRCS
6267
op_dialect.cc
6368
${cinn_op_source_file}
69+
${cinn_op_info_file}
6470
manual_op.cc
6571
op_attribute.cc
6672
DEPS

paddle/cinn/hlir/dialect/operator/ir/op_dialect.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ void OperatorDialect::initialize() {
3535
// paddle/cinn/hlir/dialect/CMakeLists.txt.
3636
RegisterOps<
3737
#define GET_OP_LIST
38-
#include "paddle/cinn/hlir/dialect/operator/ir/cinn_op.cc" // NOLINT
38+
#include "paddle/cinn/hlir/dialect/operator/ir/cinn_op_info.cc" // NOLINT
3939
>();
4040
RegisterOp<GroupOp>();
4141
RegisterOp<ConcatOp>();

paddle/common/errors.cc

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@ std::string error_name(ErrorCode code) {
5858
case ErrorCode::EXTERNAL:
5959
return "ExternalError";
6060
break;
61+
case ErrorCode::INVALID_TYPE:
62+
return "InvalidTypeError";
63+
break;
6164
default:
6265
throw std::invalid_argument("The error type is undefined.");
6366
break;

paddle/common/errors.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,10 @@ enum ErrorCode {
8585
// Third-party library error.
8686
// Error type string: "ExternalError"
8787
EXTERNAL = 12,
88+
89+
// Client specified an unmatched type.
90+
// Error type string: "INVALID_TYPE"
91+
INVALID_TYPE = 13,
8892
};
8993

9094
class ErrorSummary {
@@ -140,6 +144,7 @@ REGISTER_ERROR(Unimplemented, ErrorCode::UNIMPLEMENTED)
140144
REGISTER_ERROR(Unavailable, ErrorCode::UNAVAILABLE)
141145
REGISTER_ERROR(Fatal, ErrorCode::FATAL)
142146
REGISTER_ERROR(External, ErrorCode::EXTERNAL)
147+
REGISTER_ERROR(InvalidType, ErrorCode::INVALID_TYPE)
143148

144149
#undef REGISTER_ERROR
145150

paddle/fluid/eager/CMakeLists.txt

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,7 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER))
1717
set(eager_deps ${eager_deps} accumulation_node prim_utils)
1818
endif()
1919

20-
set(fluid_deps
21-
tracer
22-
layer
23-
proto_desc
24-
operator
25-
op_registry
26-
variable_helper
27-
memcpy)
20+
set(fluid_deps tracer layer proto_desc operator op_registry variable_helper)
2821
set(generated_deps final_dygraph_function final_dygraph_node dygraph_function
2922
dygraph_node)
3023

@@ -73,7 +66,6 @@ cc_library(
7366
operator
7467
op_registry
7568
variable_helper
76-
memcpy
7769
generated_op
7870
autograd_meta
7971
hook_utils)

paddle/fluid/eager/to_static/run_program_op_func.h

Lines changed: 43 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,10 @@ static std::vector<paddle::Tensor> Trans2ContiguousTensors(
129129
return res;
130130
}
131131

132+
inline int64_t hash_int_value(int64_t value) {
133+
return value + 0x9e3779b9 + (value << 6) + (value >> 2);
134+
}
135+
132136
inline void run_program_ad_func(
133137
const std::vector<paddle::Tensor>& x,
134138
const std::vector<paddle::Tensor>& params,
@@ -155,7 +159,22 @@ inline void run_program_ad_func(
155159
auto params_tmp = Trans2ContiguousTensors(params);
156160
// Call forward function
157161
// if require_any_grad is False, don't save any middle vars.
158-
RunProgramAPI(x_tmp, params_tmp, out, step_scope, require_any_grad, attrs);
162+
std::vector<int64_t> place_hash_keys = std::vector<int64_t>();
163+
for (const paddle::Tensor& tensor : x) {
164+
int64_t device_type = static_cast<int64_t>(tensor.place().GetType());
165+
place_hash_keys.emplace_back(hash_int_value(device_type));
166+
}
167+
for (const paddle::Tensor& tensor : params) {
168+
int64_t device_type = static_cast<int64_t>(tensor.place().GetType());
169+
place_hash_keys.emplace_back(hash_int_value(device_type));
170+
}
171+
RunProgramAPI(x_tmp,
172+
params_tmp,
173+
out,
174+
step_scope,
175+
require_any_grad,
176+
attrs,
177+
place_hash_keys);
159178
VLOG(2) << "start run run_program grad";
160179
auto is_test = false;
161180
if (attrs.count("is_test")) {
@@ -168,6 +187,9 @@ inline void run_program_ad_func(
168187
// Create GradOpNode (1 means [out_grad], 2 means [x_grad, paramx_grad])
169188
auto grad_node = std::make_shared<GradNodeRunProgram>(1, 2);
170189

190+
// Set place hash keys for backward
191+
grad_node->SetPlaceHashKeys(place_hash_keys);
192+
171193
// Set Attributes
172194
grad_node->SetAttrMap(attrs);
173195

@@ -266,9 +288,27 @@ inline void pir_run_program_ad_func(
266288

267289
// Call forward function
268290
// if require_any_grad is False, don't save any middle vars.
269-
PirRunProgramAPI(
270-
x, params, out, middles, step_scope, require_any_grad, attrs);
291+
std::vector<int64_t> place_hash_keys = std::vector<int64_t>();
292+
for (const paddle::Tensor& tensor : x) {
293+
int64_t device_type = static_cast<int64_t>(tensor.place().GetType());
294+
place_hash_keys.emplace_back(hash_int_value(device_type));
295+
}
296+
for (const paddle::Tensor& tensor : params) {
297+
int64_t device_type = static_cast<int64_t>(tensor.place().GetType());
298+
place_hash_keys.emplace_back(hash_int_value(device_type));
299+
}
300+
PirRunProgramAPI(x,
301+
params,
302+
out,
303+
middles,
304+
step_scope,
305+
require_any_grad,
306+
attrs,
307+
place_hash_keys);
271308
if (!is_test && require_any_grad) {
309+
// Set place hash keys for backward
310+
grad_node->SetPlaceHashKeys(place_hash_keys);
311+
272312
// Set Attributes
273313
grad_node->SetAttrMap(attrs);
274314

0 commit comments

Comments
 (0)