Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor(
/*disable_setting_default_stream_for_allocator=*/true,
/*stream_priority=*/0);
if (ir::IsTopologySortOperationsUnique(*graph_)) {
VLOG(10)
<< "Change thread number to 1 because the toposort order is unique";
VLOG(10) << "Change thread number to 1 because the topology sort order is "
"unique";
strategy_.num_threads_ = 1;
traced_ops_.clear();
for (auto *op_node : TopologySortOperations(*graph_)) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/fetch_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ FetchOpHandle::~FetchOpHandle() = default;

void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
PADDLE_THROW(platform::errors::PermissionDenied(
"No nodes need to wait FetchOp. Unexpceted Error."));
"No nodes need to wait FetchOp. Unexpected Error."));
}

static void CheckDims(const framework::DDim &tensor_dims,
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2037,7 +2037,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
phi::KernelContext phi_kernel_context;
if (enable_cache_runtime_context_ && !need_prepare_phi_data_ &&
!need_prepare_data_) {
// TODO(inference): Now we only suppor dense_tensor cache, we may be
// TODO(inference): Now we only support dense_tensor cache, we may be
// support ScalarTensor, SparseTensor in future.
bool all_dense_tensor_input_{true};
for (auto& iter : Inputs()) {
Expand Down Expand Up @@ -2572,7 +2572,7 @@ Scope* OperatorWithKernel::PrepareData(
// for some situation like InferShape().
// In this situation We cannot skip Var analysis, as
// oneDNN shape of Var may differ from kNHWC Var
// In such situation corressponding resized Var
// In such situation corresponding resized Var
// has to be created and registered
if ((tensor_in->layout() == DataLayout::ONEDNN) &&
(var->IsType<phi::DenseTensor>() == true) &&
Expand Down Expand Up @@ -3192,7 +3192,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
for (size_t i = 0; i < input_names.size(); ++i) {
auto it = ctx.inputs.find(input_names[i]);

// calcute the start and end index of the input tensors
// calculate the start and end index of the input tensors
size_t start_idx =
(i == 0 ? 0 : phi_kernel_context->InputRangeAt(i - 1).second);
// deal with optional here
Expand Down Expand Up @@ -3398,7 +3398,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
attr_iter,
Attrs().end(),
platform::errors::NotFound("(%s) is not found in AttributeMap when "
"buildind static KernelContext.",
"building static KernelContext.",
attr_names[i]));
switch (AttrTypeID(attr_iter->second)) {
case proto::AttrType::INTS: {
Expand Down Expand Up @@ -3472,7 +3472,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
RuntimeAttrs().end(),
platform::errors::NotFound(
"(%s) is not found in AttributeMap when "
"buildind static KernelContext.",
"building static KernelContext.",
attr_names[i]));
}

Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -639,15 +639,15 @@ void InitP2P(const std::vector<platform::Place> &places) {
for (int i = 0; i < count; ++i) {
for (int j = 0; j < count; ++j) {
if (devices[i] == devices[j]) continue;
int can_acess = -1;
int can_access = -1;
#ifdef PADDLE_WITH_HIP
hipError_t ret =
hipDeviceCanAccessPeer(&can_acess, devices[i], devices[j]);
if (ret != hipSuccess || can_acess != 1) {
hipDeviceCanAccessPeer(&can_access, devices[i], devices[j]);
if (ret != hipSuccess || can_access != 1) {
#else
cudaError_t ret =
cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]);
if (ret != cudaSuccess || can_acess != 1) {
cudaDeviceCanAccessPeer(&can_access, devices[i], devices[j]);
if (ret != cudaSuccess || can_access != 1) {
#endif
LOG(WARNING) << "Cannot enable P2P access from " << devices[i]
<< " to " << devices[j];
Expand Down
8 changes: 5 additions & 3 deletions paddle/fluid/framework/tensor_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -710,8 +710,9 @@ void TensorFromStream(std::istream& is,
PADDLE_THROW(platform::errors::Unimplemented(
"XPUPlace is not supported when not compiled with XPU"));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"CutomPlace is not supported when not compiled with CustomDevice"));
PADDLE_THROW(
platform::errors::Unimplemented("CustomPlace is not supported when "
"not compiled with CustomDevice"));
}
#endif
} else {
Expand Down Expand Up @@ -887,7 +888,8 @@ std::ostream& print_tensor(std::ostream& os, const phi::DenseTensor& tensor) {
auto element_num = tensor.numel();

os << " - data: [";
// Note: int8_t && uint8_t is typedf of char, ostream unable to print properly
// Note: int8_t && uint8_t is typedef of char, ostream unable to print
// properly
if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) {
if (element_num > 0) {
os << signed(inspect[0]);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/trainer_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ namespace framework {

class TrainerBase;

typedef std::shared_ptr<TrainerBase> (*CreatetrainerFunction)();
typedef std::unordered_map<std::string, CreatetrainerFunction> trainerMap;
typedef std::shared_ptr<TrainerBase> (*CreateTrainerFunction)();
typedef std::unordered_map<std::string, CreateTrainerFunction> trainerMap;
trainerMap g_trainer_map;

#define REGISTER_TRAINER_CLASS(trainer_class) \
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cvm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class CVMOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X",
"(LodTensor, default LodTensor<float>), a 2-D tensor with shape "
"[N x D],"
" where N is the batch size and D is the emebdding dim. ");
" where N is the batch size and D is the embedding dim. ");
AddInput("CVM",
"(Tensor), a 2-D Tensor with shape [N x 2], where N is the batch "
"size, 2 is show and click.");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/float16_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ TEST(float16, compound_on_gpu) {
TestDivAssign(6, 2, 3);
}

TEST(float16, comparision_on_gpu) {
TEST(float16, comparison_on_gpu) {
TestEqual(1, 1, true);
TestEqual(1, 2, false);
TestNotEqual(2, 3, true);
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/prim/api/manual_prim/utils/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@ namespace prim {
// We put some api like utils here
template <typename T>
Tensor empty(const paddle::experimental::IntArray& shape,
phi::DataType dype,
phi::DataType dtype,
const paddle::Place& place);

template <typename T>
Tensor empty_like(const Tensor& x,
phi::DataType dtype,
const paddle::Place& place);

// copy tensor for output ptr, in static need use assigh op
// copy tensor for output ptr, in static need use assign op
template <typename T>
void by_pass(const Tensor& x, Tensor* out);

Expand Down Expand Up @@ -114,7 +114,7 @@ static std::vector<DST_T> unsafe_vector_cast(const std::vector<SRC_T>& src) {
return dst;
}

// This fucction compute unsqueeze dims for reshape to replace unsqueeze.
// This function compute unsqueeze dims for reshape to replace unsqueeze.
static std::vector<int64_t> get_unsqueeze_dims(
const Tensor& origin, const std::vector<int64_t>& axis) {
auto origin_dims = origin.shape();
Expand Down
20 changes: 10 additions & 10 deletions paddle/phi/kernels/prior_box_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,25 +35,25 @@ void PriorBoxKernel(const Context& ctx,
DenseTensor* out,
DenseTensor* var);

inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior,
inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratio,
bool flip,
std::vector<float>* output_aspect_ratior) {
std::vector<float>* output_aspect_ratio) {
constexpr float epsilon = 1e-6;
output_aspect_ratior->clear();
output_aspect_ratior->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratior.size(); ++i) {
float ar = input_aspect_ratior[i];
output_aspect_ratio->clear();
output_aspect_ratio->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratio.size(); ++i) {
float ar = input_aspect_ratio[i];
bool already_exist = false;
for (size_t j = 0; j < output_aspect_ratior->size(); ++j) {
if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) {
for (size_t j = 0; j < output_aspect_ratio->size(); ++j) {
if (fabs(ar - output_aspect_ratio->at(j)) < epsilon) {
already_exist = true;
break;
}
}
if (!already_exist) {
output_aspect_ratior->push_back(ar);
output_aspect_ratio->push_back(ar);
if (flip) {
output_aspect_ratior->push_back(1.0f / ar);
output_aspect_ratio->push_back(1.0f / ar);
}
}
}
Expand Down