Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/feed_fetch_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ FetchType& GetFetchVariable(const Scope& scope,
const std::string& var_name,
size_t index) {
// Since we want to fetch FetchType from a variable, the variable must
// be created alreadly.
// be created already.
Variable* g_fetch_value = scope.FindVar(var_name);
PADDLE_ENFORCE_NOT_NULL(g_fetch_value,
platform::errors::NotFound(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
attr_names[i]));
}
} else {
// do nothing, skip currnet attr
// do nothing, skip current attr
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/infershape_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class CompatMetaTensor : public phi::MetaTensor {
is_runtime_,
true,
platform::errors::Unavailable(
"Only can get phi::DenseTensor from MetaTensor in rumtime."));
"Only can get phi::DenseTensor from MetaTensor in runtime."));
auto* var = PADDLE_GET_CONST(Variable*, var_);
PADDLE_ENFORCE_EQ(
var->IsType<phi::SelectedRows>(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/lod_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,8 @@ std::pair<LoD, std::pair<size_t, size_t>> GetSubLoDAndAbsoluteOffset(
const LoD& lod, size_t start_idx, size_t end_idx, size_t start_level);

/*
* Serialize/Desiralize phi::DenseTensor to std::ostream
* You can pass ofstream or ostringstream to serilize to file
* Serialize/Deserialize phi::DenseTensor to std::ostream
* You can pass ofstream or ostringstream to serialize to file
* or to a in memory string. GPU tensor will be copied to CPU.
*/
void SerializeToStream(std::ostream& os,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {
PADDLE_ENFORCE_EQ(arg_names.size(),
1UL,
platform::errors::InvalidArgument(
"The iutput(%s) should hold only one element, but "
"The input(%s) should hold only one element, but "
"now it holds %d elements.",
name,
arg_names.size()));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_version_proto.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class OpVersionMap {
};

// get version id for operators with version id in paddle 2.4.2, this is used
// for converting ProgramDesc in 2.4 comtabible format
// for converting ProgramDesc in 2.4 compatible format
const std::unordered_map<std::string, uint32_t>& GetLegacyOpVersions();

} // namespace pb
Expand Down
11 changes: 6 additions & 5 deletions paddle/fluid/framework/op_version_registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -378,11 +378,12 @@ class PassVersionCheckerRegistrar {
static PassVersionCheckerRegistrar& GetInstance();

PassVersionCheckers& Register(const std::string& pass_name) {
PADDLE_ENFORCE_EQ(pass_version_checkers_map_.find(pass_name),
pass_version_checkers_map_.end(),
platform::errors::AlreadyExists(
"PassVersionCheckers(%s) has alredy been registered.",
pass_name.c_str()));
PADDLE_ENFORCE_EQ(
pass_version_checkers_map_.find(pass_name),
pass_version_checkers_map_.end(),
platform::errors::AlreadyExists(
"PassVersionCheckers(%s) has already been registered.",
pass_name.c_str()));
return pass_version_checkers_map_[pass_name];
}
bool IsPassCompatible(const std::string& fuse_pass_name) const {
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2677,7 +2677,7 @@ Scope* OperatorWithKernel::PrepareData(
// target_kernel_type.
// Have a discussion with @Superjomn or the inference developers if some
// changes on this logic for this macro might not tested on the other
// scenerios.
// scenarios.
// If this op is not called by an Executor or ParallelExecutor, it should
// called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
// variables, that behavior a lot different.
Expand Down Expand Up @@ -3088,10 +3088,10 @@ static void SetDnnAttrIntoDeviceContext(
phi::DeviceContext* dev_ctx,
const Attribute& attr,
const std::string& attr_name,
const operators::ExtraAttrPropertySet& attr_propertys) {
const operators::ExtraAttrPropertySet& attr_properties) {
#ifdef PADDLE_WITH_DNNL
if (phi::OneDNNContext::classof(dev_ctx) &&
attr_propertys.Support(operators::ExtraAttrProperty::ONEDNN)) {
attr_properties.Support(operators::ExtraAttrProperty::ONEDNN)) {
VLOG(4) << "Runtime attr `" << attr_name << "` is passed to OneDNNContext.";
phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
switch (AttrTypeID(attr)) {
Expand Down Expand Up @@ -3124,7 +3124,7 @@ static void SetDnnAttrIntoDeviceContext(
#endif
#ifdef PADDLE_WITH_CUDA
if (phi::GPUContext::classof(dev_ctx) &&
attr_propertys.Support(operators::ExtraAttrProperty::GPUDNN)) {
attr_properties.Support(operators::ExtraAttrProperty::GPUDNN)) {
VLOG(4) << "Runtime attr `" << attr_name << "` is passed to GPUDNNContext.";
phi::GPUContext* gpu_dnn_ctx = static_cast<phi::GPUContext*>(dev_ctx);
switch (AttrTypeID(attr)) {
Expand Down Expand Up @@ -3585,8 +3585,8 @@ void OperatorWithKernel::BuildPhiKernelContext(
for (const auto& attr_iter : runtime_attrs) {
auto& attr_name = attr_iter.first;
auto& attr = attr_iter.second;
auto attr_propertys = paddle::operators::GetExtraAttrProperties(attr_name);
SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_propertys);
auto attr_properties = paddle::operators::GetExtraAttrProperties(attr_name);
SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_properties);
}
// TODO(chenweihang): Since the pass will still `SetAttr` in the OpDesc,
// we try to add these Attrs to the RuntimeAttrs, but these OpDesc will lose
Expand All @@ -3600,8 +3600,8 @@ void OperatorWithKernel::BuildPhiKernelContext(
for (const auto& attr_iter : attrs) {
auto& attr_name = attr_iter.first;
auto& attr = attr_iter.second;
auto attr_propertys = paddle::operators::GetExtraAttrProperties(attr_name);
SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_propertys);
auto attr_properties = paddle::operators::GetExtraAttrProperties(attr_name);
SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_properties);
}
VLOG(4) << "Done runtime attributes";
#endif
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1314,7 +1314,7 @@ void ParallelExecutor::InitExecutorPrivateMemberInfo(
device_name = "XPU";
} else {
PADDLE_THROW(
platform::errors::Unavailable("Only CPU/CUDA/XPU is supportted. "
platform::errors::Unavailable("Only CPU/CUDA/XPU is supported. "
"please use CPU/CUDA/XPU backend."));
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/program_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ void ProgramProcessor::AddDepToBlockOp(const BlockDesc &block) {
} else if (op_type.compare("conditional_block") == 0) {
op_input_var_vec = &((*op_inputs)["kInputs"]);
} else {
// Only support while_op and conditinal_block_op now
// Only support while_op and conditional_block_op now
LOG(WARNING)
<< "Currently, only support while_op and conditinal_block_op.\n";
<< "Currently, only support while_op and conditional_block_op.\n";
continue;
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/proto_desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ constexpr int kNoneBlockIndex = -1;
constexpr int kNoneProcessMeshIndex = -1;

// If a attribute name has a certain suffix, it means that the
// atrribute is a distributed-related attribute for auto parallel.
// attribute is a distributed-related attribute for auto parallel.
// e.g., "mesh_id@AUTO_PARALLEL".
constexpr char kAutoParallelSuffix[] = "@AUTO_PARALLEL";

Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/framework/prune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ int GetOpRole(const proto::OpDesc& op_desc) {
}
}
// If attr op_role is not found, it may be operator created in c++ test, like
// prune_test.cc. In that case, the op_role should be defaut value, which is
// prune_test.cc. In that case, the op_role should be default value, which is
// kNotSpecified.
return static_cast<int>(OpRole::kNotSpecified);
}
Expand Down Expand Up @@ -261,7 +261,7 @@ void prune_impl(const proto::ProgramDesc& input,
for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) {
auto& op_desc = *op_iter;

// TODO(wanghaipeng03) reconstruct the follwing if/else block
// TODO(wanghaipeng03) reconstruct the following if/else block
// to extract common code
//
// bool should_run_flag = false;
Expand All @@ -281,7 +281,7 @@ void prune_impl(const proto::ProgramDesc& input,
//
// should_run.push_back(should_run_flag);
// if (should_run_flag) {
// for (auto & var: op_desc.iputs()) {
// for (auto & var: op_desc.inputs()) {
// for (....) {
// if (.....) {
// dependent_vars->insert(argu);
Expand All @@ -304,7 +304,7 @@ void prune_impl(const proto::ProgramDesc& input,
add_dependent_var(argu);
}
}
// NOTE(dev): All attibute with VarDesc type is considered as Input,
// NOTE(dev): All attribute with VarDesc type is considered as Input,
// so they shall be added into dependent_vars.
for (auto& attr : op_desc.attrs()) {
if (attr.type() == proto::AttrType::VAR) {
Expand Down Expand Up @@ -391,7 +391,7 @@ void prune_impl(const proto::ProgramDesc& input,
std::vector<int> sub_indices;
GetSubBlocksIndices(*op, &sub_indices);
for (auto& sub_index : sub_indices) {
// create a copy of dependent_vars to avoid being overwrited by the
// create a copy of dependent_vars to avoid being overwritten by the
// other sub_block
std::unordered_set<std::string> dependent_vars_copy =
sub_block_dependent_vars;
Expand Down Expand Up @@ -438,7 +438,7 @@ void prune_impl(const proto::ProgramDesc& input,
add_var_names(arg);
}
}
// NOTE(dev): All attibute with VarDesc type is considered as Input,
// NOTE(dev): All attribute with VarDesc type is considered as Input,
// so they shall be added into dependent_vars.
for (auto& attr : op.attrs()) {
if (attr.type() == proto::AttrType::VAR) {
Expand Down