Skip to content

Commit d23f0f6

Browse files
authored
Fix cahce cache (#60935)
1 parent 1e60ba9 commit d23f0f6

10 files changed

Lines changed: 31 additions & 31 deletions

paddle/fluid/eager/amp_auto_cast.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ static inline bool NeedCast(const paddle::Tensor& tensor,
2828
paddle::platform::is_cuda_pinned_place(place) ||
2929
paddle::platform::is_xpu_place(place) ||
3030
paddle::platform::is_custom_place(place)) {
31-
// CudaPinndePlace is added for varbase created by dataloader
31+
// CudaPinnedPlace is added for varbase created by dataloader
3232
if ((data_type == phi::DataType::FLOAT32 ||
3333
data_type == phi::DataType::FLOAT16 ||
3434
data_type == phi::DataType::BFLOAT16) &&

paddle/fluid/eager/autograd_meta.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ using AbstractAutogradMeta = paddle::AbstractAutogradMeta;
4545
*
4646
* 4. overrided_stop_gradient_
4747
* This member is used to finish some auto-prune related work, which indicate
48-
* user set stop_gradient should overrided the result indicated by framework.
48+
* user set stop_gradient should override the result indicated by framework.
4949
* All non-parameter tensor's stop_gradient properties should be true. We will
5050
* pass stop_gradient when we find one who need it.
5151
*

paddle/fluid/eager/backward.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,19 +84,19 @@ void EnforceGradNodeHasInput(GradNodeBase* node) {
8484
}
8585

8686
void DuplicateCheck(const std::vector<paddle::Tensor>& inputs, bool is_input) {
87-
std::unordered_set<AutogradMeta*> visisted_ins;
87+
std::unordered_set<AutogradMeta*> visited_ins;
8888
std::string msg = is_input ? "inputs" : "outputs";
8989
for (auto const& in : inputs) {
9090
AutogradMeta* auto_grad_meta = EagerUtils::unsafe_autograd_meta(in);
9191
PADDLE_ENFORCE_EQ(
92-
visisted_ins.count(auto_grad_meta),
92+
visited_ins.count(auto_grad_meta),
9393
0,
9494
paddle::platform::errors::AlreadyExists(
9595
"%s contain duplicate tensor %s, please check %s carefully.",
9696
msg,
9797
in.name(),
9898
msg));
99-
visisted_ins.insert(auto_grad_meta);
99+
visited_ins.insert(auto_grad_meta);
100100
}
101101
}
102102

paddle/fluid/eager/eager_amp_auto_cast.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ static inline bool NeedCast(const paddle::Tensor& tensor,
2929
paddle::platform::is_xpu_place(place) ||
3030
paddle::platform::is_custom_place(place) ||
3131
paddle::platform::is_cpu_place(place)) {
32-
// CudaPinndePlace is added for varbase created by dataloader
33-
// Cpu place is for differnt place tensor, when input1 is cpu and input2 is
32+
// CudaPinnedPlace is added for varbase created by dataloader
33+
// Cpu place is for different place tensor, when input1 is cpu and input2 is
3434
// gpu
3535
if ((data_type == phi::DataType::FLOAT32 ||
3636
data_type == phi::DataType::FLOAT16 ||

paddle/fluid/eager/eager_layout_auto_tune.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune(
135135
paddle::imperative::LayoutAutoTune::Instance().SetDefaultLayout(
136136
phi::DataLayout::NCHW);
137137
} else {
138-
VLOG(4) << "DisableLayoutAutoTune accoding to Conv op"
138+
VLOG(4) << "DisableLayoutAutoTune according to Conv op"
139139
<< " dtype : " << data_type << " format : " << (*attr);
140140
egr::Controller::Instance().DisableLayoutAutoTune();
141141
return transposer;
@@ -209,15 +209,15 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune<int, int>(
209209
int* start_axis,
210210
int* stop_axis) {
211211
if (DesiredLayout() == phi::DataLayout::UNDEFINED) {
212-
VLOG(4) << "Optimze Layout was not started" << op_name;
212+
VLOG(4) << "Optimize Layout was not started" << op_name;
213213
return std::make_shared<EagerLayoutTransformer>(
214214
op_name, tensors_vector, tensors_vector[0][0].layout());
215215
}
216216

217-
bool no_tranpose = tensors_vector[0][0].layout() == DesiredLayout();
217+
bool no_transpose = tensors_vector[0][0].layout() == DesiredLayout();
218218
bool is_valid = ((*start_axis) == 1 && (*stop_axis) == 3);
219219
if (op_name == "flatten" || op_name == "flatten_contiguous_range") {
220-
if (no_tranpose && is_valid) {
220+
if (no_transpose && is_valid) {
221221
return std::make_shared<EagerFlattenOpTransformer>(op_name);
222222
}
223223
}
@@ -232,7 +232,7 @@ EagerLayoutAutotune<paddle::experimental::Scalar>(
232232
kSlotSmallVectorSize>& tensors_vector,
233233
paddle::experimental::Scalar* axis) {
234234
if (DesiredLayout() == phi::DataLayout::UNDEFINED) {
235-
VLOG(4) << "Optimze Layout was not started" << op_name;
235+
VLOG(4) << "Optimize Layout was not started" << op_name;
236236
return std::make_shared<EagerLayoutTransformer>(
237237
op_name, tensors_vector, tensors_vector[0][0].layout());
238238
}

paddle/fluid/eager/eager_layout_transformer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ class EagerLayoutTransformer {
129129
dim_size_ = in.shape().size();
130130
bool need_trans =
131131
!(final_layout_ == Layout::UNDEFINED || final_layout_ == in.layout());
132-
// This is for Agnostic op when layout is differnet
132+
// This is for Agnostic op when layout is different
133133
if (need_trans) {
134134
auto out_tensor = EagerTraceTransposeOp(final_layout_, in);
135135
phi::DenseTensorUtils::GetMutableMeta(

paddle/fluid/eager/eager_tensor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ inline bool IsVariableCompatTensor(const paddle::Tensor& tensor) {
199199
* **/
200200
class EagerVariable final {
201201
public:
202-
/* Default constructor and name constructor should only be used for contruct
202+
/* Default constructor and name constructor should only be used for construct
203203
* output and in fluid*/
204204
EagerVariable() = default;
205205

@@ -293,7 +293,7 @@ class EagerVariable final {
293293
true,
294294
paddle::platform::errors::Fatal(
295295
"Tensor %s does not hold phi::SelectedRows or phi::DenseTensor. "
296-
"Or it holds empty impl, this should not happend since we should "
296+
"Or it holds empty impl, this should not happened since we should "
297297
"treat all kinds of tensor as what they are.",
298298
tensor.name()));
299299
*framework_tensor = *tensor_dense;
@@ -314,7 +314,7 @@ class EagerVariable final {
314314
PADDLE_ENFORCE_NOT_NULL(compat_tensor,
315315
paddle::platform::errors::Fatal(
316316
"Tensor %s holds empty impl, this should not "
317-
"happend since we should "
317+
"happened since we should "
318318
"treat all kinds of tensor as what they are.",
319319
tensor.name()));
320320
*framework_holder = compat_tensor->Get<VarType>();

paddle/fluid/eager/grad_node_info.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -293,14 +293,14 @@ class GradNodeBase {
293293

294294
std::map<int64_t, std::tuple<size_t, size_t, std::shared_ptr<TensorHook>>>
295295
GetGradientHookFuntions() {
296-
VLOG(7) << "GetGradientHookFuntions ";
296+
VLOG(7) << "GetGradientHookFunctions ";
297297
return gradient_hooks_;
298298
}
299299

300300
void SetGradientHookFuntions(
301301
std::map<int64_t, std::tuple<size_t, size_t, std::shared_ptr<TensorHook>>>
302302
hooks) {
303-
VLOG(7) << "SetGradientHookFuntions ";
303+
VLOG(7) << "SetGradientHookFunctions ";
304304
gradient_hooks_ = hooks;
305305
}
306306

paddle/fluid/eager/to_static/run_program_op_node.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -553,7 +553,7 @@ inline void PirRunProgramAPI(
553553
// program_id, global_inner_scope, false, skip_eager_delete_vars);
554554
} else {
555555
paddle::platform::RecordEvent record_event(
556-
"get_interpretercore_cahce",
556+
"get_interpretercore_cache",
557557
paddle::platform::TracerEventType::UserDefined,
558558
1);
559559
VLOG(2) << "Get interpretercore cache by program:" << program_id;
@@ -710,7 +710,7 @@ inline void RunProgramAPI(
710710
"create_new_interpretercore",
711711
paddle::platform::TracerEventType::UserDefined,
712712
1);
713-
VLOG(2) << "No interpretercore cahce, so create a new interpretercore "
713+
VLOG(2) << "No interpretercore cache, so create a new interpretercore "
714714
"for program: "
715715
<< program_id;
716716
// Step 1. share input_vars & parameters into scope
@@ -781,10 +781,10 @@ inline void RunProgramAPI(
781781
VLOG(2) << "Get skip GC vars size is: " << skip_eager_delete_vars.size();
782782
} else {
783783
paddle::platform::RecordEvent record_event(
784-
"get_interpretercore_cahce",
784+
"get_interpretercore_cache",
785785
paddle::platform::TracerEventType::UserDefined,
786786
1);
787-
VLOG(2) << "Get interpretercore cahce by program:" << program_id;
787+
VLOG(2) << "Get interpretercore cache by program:" << program_id;
788788
// Step 1. get cache interpretercore
789789
auto &cached_value =
790790
interpretercore_info_cache.GetMutable(program_id,
@@ -889,7 +889,7 @@ inline void RunProgramGradAPI(
889889
"create_new_interpretercore",
890890
paddle::platform::TracerEventType::UserDefined,
891891
1);
892-
VLOG(2) << "No interpretercore cahce, so create a new interpretercore"
892+
VLOG(2) << "No interpretercore cache, so create a new interpretercore"
893893
"for program: "
894894
<< program_id;
895895
details::ShareTensorsIntoScope(out_grad, global_inner_scope);
@@ -968,10 +968,10 @@ inline void RunProgramGradAPI(
968968
VLOG(2) << "Get skip GC vars size is: " << skip_eager_delete_vars.size();
969969
} else {
970970
paddle::platform::RecordEvent record_event(
971-
"get_interpretercore_cahce",
971+
"get_interpretercore_cache",
972972
paddle::platform::TracerEventType::UserDefined,
973973
1);
974-
VLOG(2) << "Get interpretercore cahce by program:" << program_id;
974+
VLOG(2) << "Get interpretercore cache by program:" << program_id;
975975
auto &cached_value =
976976
interpretercore_info_cache.GetMutable(program_id,
977977
global_inner_scope,
@@ -1095,7 +1095,7 @@ inline void PirRunProgramGradAPI(
10951095
"create_new_interpretercore",
10961096
paddle::platform::TracerEventType::UserDefined,
10971097
1);
1098-
VLOG(2) << "No interpretercore cahce, so create a new interpretercore";
1098+
VLOG(2) << "No interpretercore cache, so create a new interpretercore";
10991099
// Step 1. share input_vars & parameters into scope
11001100
auto passed_kernel_program =
11011101
paddle::framework::ApplyIrPass(backward_program, place);
@@ -1152,10 +1152,10 @@ inline void PirRunProgramGradAPI(
11521152
details::print_collection(skip_eager_delete_vars);
11531153
} else {
11541154
paddle::platform::RecordEvent record_event(
1155-
"get_interpretercore_cahce",
1155+
"get_interpretercore_cache",
11561156
paddle::platform::TracerEventType::UserDefined,
11571157
1);
1158-
VLOG(2) << "Get interpretercore cahce by program:" << program_id;
1158+
VLOG(2) << "Get interpretercore cache by program:" << program_id;
11591159
auto &cached_value =
11601160
interpretercore_info_cache.GetMutable(program_id,
11611161
global_inner_scope,

paddle/fluid/eager/utils.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ std::vector<AutogradMeta*> EagerUtils::autograd_meta(
133133
std::vector<AutogradMeta*> ret;
134134
ret.reserve(targets->size());
135135

136-
// for autograd_meta we can tolerent it has nullptr.
136+
// for autograd_meta we can tolerate it has nullptr.
137137
for (auto& target : *targets) {
138138
auto* p_autograd_meta = autograd_meta(&target);
139139
ret.emplace_back(p_autograd_meta);
@@ -146,7 +146,7 @@ std::vector<AutogradMeta*> EagerUtils::autograd_meta(
146146
std::vector<AutogradMeta*> ret;
147147
ret.reserve(targets->size());
148148

149-
// for autograd_meta we can tolerent it has nullptr.
149+
// for autograd_meta we can tolerate it has nullptr.
150150
for (auto& target : *targets) {
151151
auto* p_autograd_meta = autograd_meta(target);
152152
ret.emplace_back(p_autograd_meta);
@@ -506,7 +506,7 @@ std::shared_ptr<egr::GradNodeBase> EagerUtils::GetGradAccumulationNode(
506506
PADDLE_THROW(paddle::platform::errors::Fatal(
507507
"GetGradAccumulationNode should only be called on leaf tensor, but "
508508
"target tensor: %s has GradNode which is not a "
509-
"GradNodeAccumulation, and this should not happend unless target "
509+
"GradNodeAccumulation, and this should not happened unless target "
510510
"tensor is modified by some ops and calling set history for it.",
511511
tensor.name()));
512512
}

0 commit comments

Comments
 (0)