Skip to content

Commit a186e60

Browse files
sanbuphyLigomlSigureMo
authored
在文档中统一静态图模式与动态图模式的英文翻译 (#49170)
* 1219 * temporarily change the num_diff_files limit, test=document_fix * Revert "temporarily change the num_diff_files limit, test=document_fix" This reverts commit 8e70f00. * for codestyle * remove duplicate license * `static mode` -> `static graph mode` * Update hybrid_parallel_inference.py * Update layer_function_generator.py * Update manipulation.py * reset Co-authored-by: Ligoml <[email protected]> Co-authored-by: SigureMo <[email protected]>
1 parent 162f8fe commit a186e60

File tree

106 files changed

+207
-184
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

106 files changed

+207
-184
lines changed

paddle/fluid/eager/autograd_meta.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ using AbstractAutogradMeta = paddle::experimental::AbstractAutogradMeta;
2323
*
2424
* AutogradMeta is what record the backward info for tensor. When we run
2525
* computation graph eagerly, we can not build a static paddle program like
26-
* static mode do, so we need a new method to record forward info to trace
26+
* static graph mode do, so we need a new method to record forward info to trace
2727
* backward when we finish all forward computation. This require our
2828
* AutogradMeta class record following main members
2929
*

paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ bool BuildOpFuncList(const platform::Place& place,
760760
new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
761761
phi_kernel_name, phi_cpu_kernel_key)));
762762
if (op_with_kernel->PhiKernel()->IsValid()) {
763-
VLOG(6) << "Static mode PrepareImpl - kernel name: "
763+
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
764764
<< phi_kernel_name
765765
<< " | kernel key: " << phi_cpu_kernel_key
766766
<< " | kernel: " << *(op_with_kernel->PhiKernel());

paddle/fluid/framework/operator.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1679,12 +1679,12 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
16791679
phi_kernel_name, phi_kernel_key)));
16801680

16811681
if (phi_kernel_->IsValid()) {
1682-
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
1682+
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
16831683
<< phi_kernel_name << " | kernel key: " << phi_kernel_key
16841684
<< " | kernel: " << *phi_kernel_;
16851685
} else {
1686-
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
1687-
<< "` not found.";
1686+
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `"
1687+
<< phi_kernel_name << "` not found.";
16881688
}
16891689
} else {
16901690
phi_kernel_name = kernel_signature_->name;
@@ -1815,7 +1815,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
18151815

18161816
dev_ctx = pool.Get(platform::CPUPlace());
18171817
if (phi_kernel_->IsValid()) {
1818-
VLOG(6) << "Static mode PrepareImpl - kernel name: "
1818+
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
18191819
<< phi_kernel_name << " | kernel key: " << phi_cpu_kernel_key
18201820
<< " | kernel: " << *phi_kernel_;
18211821
run_phi_kernel_ = true;
@@ -2083,11 +2083,11 @@ phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
20832083
phi_kernel_name, phi_kernel_key)));
20842084

20852085
if (phi_kernel_->IsValid()) {
2086-
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << phi_kernel_name
2087-
<< " | kernel key: " << phi_kernel_key
2086+
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
2087+
<< phi_kernel_name << " | kernel key: " << phi_kernel_key
20882088
<< " | kernel: " << *phi_kernel_;
20892089
} else {
2090-
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
2090+
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `" << phi_kernel_name
20912091
<< "` not found.";
20922092
}
20932093
return phi_kernel_key;

paddle/fluid/imperative/tracer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ class Tracer {
136136
}
137137

138138
// Note(Aurelius84): The `tmp` is used as prefix key while naming a temporary
139-
// intermediate var both in imperative and static mode. But the
139+
// intermediate var both in imperative and static graph mode. But the
140140
// `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share
141141
// the same auto-increment id. It will create a variable repeatedly with same
142142
// name like `tmp_0` in some cases when transform dygraph into static layers.

paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
1-
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2-
3-
Licensed under the Apache License, Version 2.0 (the "License");
4-
you may not use this file except in compliance with the License.
5-
You may obtain a copy of the License at
6-
7-
http://www.apache.org/licenses/LICENSE-2.0
8-
9-
Unless required by applicable law or agreed to in writing, software
10-
distributed under the License is distributed on an "AS IS" BASIS,
11-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12-
See the License for the specific language governing permissions and
13-
limitations under the License. */
1+
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
1414

1515
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
1616
#include "paddle/fluid/inference/tensorrt/plugin/c_allreduce_op_plugin.h"
@@ -32,8 +32,9 @@ class CAllReduceOpConverter : public OpConverter {
3232
bool test_mode) override {
3333
VLOG(4) << "convert fluid callreduce op to tensorrt layer";
3434
if (!engine_->with_dynamic_shape()) {
35-
PADDLE_THROW(platform::errors::Fatal(
36-
"Unsupported static mode. Please set dynamic shape of inputs."));
35+
PADDLE_THROW(
36+
platform::errors::Fatal("Unsupported static graph mode. Please set "
37+
"dynamic shape of inputs."));
3738
}
3839
ReduceType red_type = op_to_reduce_type[op.type()];
3940
std::string name = op.type();

paddle/fluid/inference/tensorrt/convert/preln_residual_bias.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,9 @@ class PrelnResidualBiasOpConverter : public OpConverter {
2828
bool test_mode) override {
2929
VLOG(4) << "convert fused preln_residual_bias op to tensorrt layer";
3030
if (!engine_->with_dynamic_shape()) {
31-
PADDLE_THROW(platform::errors::Fatal(
32-
"Unsupported static mode. Please set dynamic shape of inputs."));
31+
PADDLE_THROW(
32+
platform::errors::Fatal("Unsupported static graph mode. Please set "
33+
"dynamic shape of inputs."));
3334
}
3435
framework::OpDesc op_desc(op, nullptr);
3536
// Declare inputs

paddle/fluid/operators/run_program_op.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -288,8 +288,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
288288
auto *out_scope_vec = ctx.Output<StepScopeVar>("OutScope");
289289
std::unique_ptr<framework::Scope> inner_scope{nullptr};
290290
if (out_scope_vec->size() == 0) {
291-
// For cuda graph under static mode usage.
292-
// For static mode, we cannot set value of a tensor before any run,
291+
// For cuda graph under static graph mode usage.
292+
// For static graph mode, we cannot set value of a tensor before any run,
293293
// the OutScope variable passed to the op actually contains nothing.
294294
// Just create a tmp scope to run the program.
295295
PADDLE_ENFORCE_EQ(

paddle/fluid/operators/set_value_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ class SetValueMaker : public framework::OpProtoAndCheckerMaker {
145145
AddAttr<std::vector<int64_t>>("shape", "(vector<int64_t>) Shape of values.")
146146
.SetDefault({});
147147
AddComment(R"DOC(SetValue operator.
148-
Assignment to a phi::DenseTensor in static mode.
148+
Assignment to a phi::DenseTensor in static graph mode.
149149
)DOC");
150150
}
151151
};

paddle/fluid/pybind/eager_legacy_op_function_generator.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -443,9 +443,9 @@ GenerateOpFunctions() {
443443
// In this case, output will reuse input varbase.
444444
// Dygraph mode needs to be aligned with the in-place strategy in static
445445
// mode, and the mapping relationships between output and input that have
446-
// been defined in static mode should be used in dygraph mode.
447-
// Find which ops need to use Inplace strategy in static mode, and get the
448-
// mapping relationship between Inplace output and input.
446+
// been defined in static graph mode should be used in dygraph mode.
447+
// Find which ops need to use Inplace strategy in static graph mode, and get
448+
// the mapping relationship between Inplace output and input.
449449
auto& infer_inplace =
450450
paddle::framework::OpInfoMap::Instance().Get(op_type).infer_inplace_;
451451
std::map<std::string, std::string> inplace_map;

paddle/fluid/pybind/eager_properties.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
3939
EAGER_TRY
4040
// NOTE(dev): [why not use egr::Controller::Instance::GernerateUniqueName()?]
4141
// Beacause Controller must holder a tracer, but 'tensor.name' maybe called
42-
// everywhere such as static mode in @to_static, which means tracer is None.
42+
// everywhere such as static graph mode in @to_static, which means tracer is
43+
// None.
4344
static egr::UniqueNameGenerator name_generator;
4445
if (self->tensor.name().empty()) {
4546
self->tensor.set_name(name_generator.Generate());

0 commit comments

Comments
 (0)