From 7ab8930e1ba56862d9d0c7305c6b048f106dc527 Mon Sep 17 00:00:00 2001 From: co63oc Date: Sun, 3 Mar 2024 08:21:12 +0800 Subject: [PATCH] Fix --- paddle/phi/core/distributed/auto_parallel/dist_tensor.h | 2 +- .../phi/core/distributed/auto_parallel/inferspmd_utils.h | 2 +- paddle/phi/core/distributed/auto_parallel/proto_helper.cc | 8 ++++---- paddle/phi/core/distributed/auto_parallel/proto_helper.h | 4 ++-- .../auto_parallel/reshard/nd_mesh_reshard_function.cc | 2 +- .../auto_parallel/reshard/same_status_reshard_function.cc | 2 +- paddle/phi/core/sparse_coo_tensor.h | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/paddle/phi/core/distributed/auto_parallel/dist_tensor.h b/paddle/phi/core/distributed/auto_parallel/dist_tensor.h index bf5b083aa6e6fc..5af868ef01f172 100644 --- a/paddle/phi/core/distributed/auto_parallel/dist_tensor.h +++ b/paddle/phi/core/distributed/auto_parallel/dist_tensor.h @@ -79,7 +79,7 @@ class DistTensor final const Placements& placements); /// \brief Construct a empty dist tensor (for infer spmd) - /// \param dims The global dimension of the currnet Tensor. + /// \param dims The global dimension of the current Tensor. /// \param dist_attr The distributed attributes of the current tensor. DistTensor(const DDim& dims, const TensorDistAttr& dist_attr); diff --git a/paddle/phi/core/distributed/auto_parallel/inferspmd_utils.h b/paddle/phi/core/distributed/auto_parallel/inferspmd_utils.h index 71395507a09519..d2c22bcd08db0b 100644 --- a/paddle/phi/core/distributed/auto_parallel/inferspmd_utils.h +++ b/paddle/phi/core/distributed/auto_parallel/inferspmd_utils.h @@ -107,7 +107,7 @@ struct InferSpmdFnImpl { } }; - // for vecotr slot + // for vector slot template struct InferSpmdFnCallHelper&, Tail...> { diff --git a/paddle/phi/core/distributed/auto_parallel/proto_helper.cc b/paddle/phi/core/distributed/auto_parallel/proto_helper.cc index e8e4197a63c08a..fad63c15d63bde 100644 --- a/paddle/phi/core/distributed/auto_parallel/proto_helper.cc +++ b/paddle/phi/core/distributed/auto_parallel/proto_helper.cc @@ -35,8 +35,8 @@ auto_parallel::ProcessMeshProto to_proto(const ProcessMesh& process_mesh) { } auto_parallel::DeviceCapabilityProto to_proto( - const auto_parallel::DeviceCapability& device_capibilty) { - TO_PROTO_HELPER(device_capibilty, auto_parallel::DeviceCapabilityProto); + const auto_parallel::DeviceCapability& device_capability) { + TO_PROTO_HELPER(device_capability, auto_parallel::DeviceCapabilityProto); } auto_parallel::DeviceProto to_proto(const auto_parallel::Device& device) { @@ -44,8 +44,8 @@ auto_parallel::DeviceProto to_proto(const auto_parallel::Device& device) { } auto_parallel::LinkCapabilityProto to_proto( - const auto_parallel::LinkCapability& link_capibilty) { - TO_PROTO_HELPER(link_capibilty, auto_parallel::LinkCapabilityProto); + const auto_parallel::LinkCapability& link_capability) { + TO_PROTO_HELPER(link_capability, auto_parallel::LinkCapabilityProto); } auto_parallel::LinkProto to_proto(const auto_parallel::Link& link) { diff --git a/paddle/phi/core/distributed/auto_parallel/proto_helper.h b/paddle/phi/core/distributed/auto_parallel/proto_helper.h index 66bdf2af74406d..840c0eb95f89ec 100644 --- a/paddle/phi/core/distributed/auto_parallel/proto_helper.h +++ b/paddle/phi/core/distributed/auto_parallel/proto_helper.h @@ -30,10 +30,10 @@ auto_parallel::TensorDistAttrProto to_proto(const TensorDistAttr& dist_attr); auto_parallel::ProcessMeshProto to_proto(const ProcessMesh& dist_attr); auto_parallel::DeviceCapabilityProto to_proto( - const auto_parallel::DeviceCapability& device_capibilty); + const auto_parallel::DeviceCapability& device_capability); auto_parallel::DeviceProto to_proto(const auto_parallel::Device& device); auto_parallel::LinkCapabilityProto to_proto( - const auto_parallel::LinkCapability& link_capibilty); + const auto_parallel::LinkCapability& link_capability); auto_parallel::LinkProto to_proto(const auto_parallel::Link& link); auto_parallel::DeviceMeshProto to_proto(const auto_parallel::DeviceMesh& link); auto_parallel::DistributedMapperProto to_proto( diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/nd_mesh_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/nd_mesh_reshard_function.cc index b7a6679590e639..7a044209677d30 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/nd_mesh_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/nd_mesh_reshard_function.cc @@ -228,7 +228,7 @@ void SameNdMeshReshardFunction::Eval(phi::DeviceContext* dev_ctx, bool is_partial = in_partial_status.count(out_mesh_axis) != 0; VLOG(3) << "Step4: out_mesh axis : " << out_mesh_axis - << "; paratial state :" << is_partial; + << "; partial state :" << is_partial; // 4.1 Calculate the dist_attr after this transform TensorDistAttr real_out_dist_attr(out->dist_attr()); std::vector real_dims_mapping = diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc index 2869951addffc9..0a86275203b51c 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc @@ -91,7 +91,7 @@ void SameStatusReshardFunction::Eval(phi::DeviceContext* dev_ctx, if (src == cur_global_rank) { VLOG(3) << "Send from src " << src << " to dst " << dst; int64_t dst_local_rank = GetLocalRankInParticipate(all_process_ids, dst); - // Sice send kernel only has input, so we don't need to infermeta + // Since send kernel only has input, so we don't need to infermeta // actually. According to this reason, just use the kernel directly. RESHARD_FUNCTOR_WITH_COMM(dev_ctx, PSendKernel, diff --git a/paddle/phi/core/sparse_coo_tensor.h b/paddle/phi/core/sparse_coo_tensor.h index d0759bedcf5574..61c8b0c3d2a5b0 100644 --- a/paddle/phi/core/sparse_coo_tensor.h +++ b/paddle/phi/core/sparse_coo_tensor.h @@ -127,7 +127,7 @@ class SparseCooTensor : public TensorBase, /// \brief Test whether the non_zero_elements_ storage is allocated. /// In special cases, when nnz=0, non_zero_elements_ will not need to be - /// initialized, but it is neccessary to return true here, otherwise the + /// initialized, but it is necessary to return true here, otherwise the /// gradient will be None. return Whether the non_zero_elements_ storage is /// allocated. bool initialized() const override { @@ -189,7 +189,7 @@ class SparseCooTensor : public TensorBase, /// \brief get the sparse dim int32_t sparse_dim() const; - /// \brief get the dnese dim + /// \brief get the dense dim int32_t dense_dim() const; /// \brief Returns the meta information of the tensor.