Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/fluid/operators/pull_gpups_sparse_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ static void PullGpuPSSparseFunctor(const framework::ExecutionContext &ctx) {
auto embedding_size_vec = ctx.Attr<std::vector<int>>("size");
const auto slot_size = inputs.size();
std::vector<const uint64_t *> all_keys(slot_size);
// GpuPSPS only supports float now
// GpuPS only supports float now
std::vector<float *> all_values(slot_size);
std::vector<int64_t> slot_lengths(slot_size);
for (size_t i = 0; i < slot_size; i++) {
Expand Down Expand Up @@ -80,7 +80,7 @@ static void PushGpuPSSparseFunctor(const framework::ExecutionContext &ctx) {
cur_batch_size,
platform::errors::PreconditionNotMet(
"The batch size of all input slots should be same, "
"please cheack"));
"please check"));
}
const float *grad_value = d_output[i]->data<float>();
all_grad_values[i] = grad_value;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/py_func_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ static void CallPythonFunc(py::object *callable,
out->ShareDataWith(*py_out_tensor);
} catch (py::cast_error &) {
PADDLE_THROW(platform::errors::InvalidArgument(
"py::cast to phi::DenseTensor error. The %d-th output expection is "
"py::cast to phi::DenseTensor error. The %d-th output exception is "
"phi::DenseTensor",
i));
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/randperm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace paddle {
namespace operators {

template <typename T>
static inline void random_permate(T* data_ptr, int num, unsigned int seed) {
static inline void random_permute(T* data_ptr, int num, unsigned int seed) {
auto engine = phi::GetCPURandomEngine(seed);
for (int i = 0; i < num; ++i) {
data_ptr[i] = static_cast<T>(i);
Expand All @@ -50,13 +50,13 @@ class RandpermKernel : public framework::OpKernel<T> {

if (platform::is_cpu_place(ctx.GetPlace())) {
T* out_data = out_tensor->mutable_data<T>(platform::CPUPlace());
random_permate<T>(out_data, n, seed);
random_permute<T>(out_data, n, seed);

} else {
phi::DenseTensor tmp_tensor;
tmp_tensor.Resize(common::make_ddim({n}));
T* tmp_data = tmp_tensor.mutable_data<T>(platform::CPUPlace());
random_permate<T>(tmp_data, n, seed);
random_permute<T>(tmp_data, n, seed);
framework::TensorCopy(tmp_tensor, ctx.GetPlace(), out_tensor);
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/read_file_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class ReadFileOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
This operator read a file.
)DOC");
AddAttr<std::string>("filename", "Path of the file to be readed.")
AddAttr<std::string>("filename", "Path of the file to be read.")
.SetDefault({});
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/repeat_interleave_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class RepeatInterleaveOp : public framework::OperatorWithKernel {
} else if (repeats > 0) {
output_dim[dim] = input_dim[dim] * repeats;
}
VLOG(3) << "infershap out " << output_dim[dim];
VLOG(3) << "infershape out " << output_dim[dim];
ctx->SetOutputDim("Out", common::make_ddim(output_dim));
auto type = ctx->GetInputsVarType("X")[0];
if (type == framework::proto::VarType::LOD_TENSOR) {
Expand Down Expand Up @@ -124,7 +124,7 @@ class RepeatInterleaveOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "(Tensor) the input tensor.");
AddInput("RepeatsTensor",
"the 1-D tensor containing the repeats alongsize the axis.")
"the 1-D tensor containing the repeats alongside the axis.")
.AsDispensable();
AddOutput("Out", "the output tensor.");
AddAttr<int>("Repeats", "the number of repetitions for each element.")
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ class Reshape2CompositeGradOpMaker : public prim::CompositeGradOpMakerBase {

auto *dx_ptr = this->GetOutputPtr(&dx);
std::string dx_name = this->GetOutputName(dx);
VLOG(6) << "Runing reshape2_grad composite func";
VLOG(6) << "Running reshape2_grad composite func";
prim::reshape_grad<prim::DescTensor>(x, out_grad, dx_ptr);
this->RecoverOutputName(dx, dx_name);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/split_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ class SplitCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
"We don't support dynamic index or sections from tensor for split "
"composite grad for now. "));
} else {
VLOG(6) << "Runing split_grad composite func";
VLOG(6) << "Running split_grad composite func";
prim::split_grad<prim::DescTensor>(out_grad, axis, dx_ptr);
this->RecoverOutputName(input_grad, dx_name);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput(
"X",
"A Varaible list. The shape and data type of the list elements"
"A Variable list. The shape and data type of the list elements"
"should be consistent. Variable can be multi-dimensional Tensor"
"or phi::DenseTensor, and data types can be: float32, float64, int32, "
"int64.")
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/svd_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ struct DiagAndFillFunctor {

template <typename DeviceContext, typename T, typename ValueType = T>
struct DeviceIndependenceTensorOperations {
// 1. Device indenpendence, for kernel reuse.
// 1. Device independence, for kernel reuse.
// 2. Input and output is always tensor type.
// 3. output phi::DenseTensor is alway allocated
// 4. Basic phi::DenseTensor operator is supported
Expand Down Expand Up @@ -315,7 +315,7 @@ struct DeviceIndependenceTensorOperations {
}

phi::DenseTensor Transpose(const phi::DenseTensor& x) {
// transpose the last two dimision
// transpose the last two dimension
phi::DenseTensor ret;
auto x_dim = x.dims();
auto x_vec = common::vectorize<int>(x_dim);
Expand Down Expand Up @@ -745,15 +745,15 @@ struct DeviceIndependenceTensorOperations {
const framework::AttributeMap& attrs,
std::vector<int> out_shape,
NameOutTensor out_str = {"Out"}) {
// varialble set dims must be phi::DenseTensor / SelectedRowTensor
// variable set dims must be phi::DenseTensor / SelectedRowTensor
framework::Scope& local_scope = context.scope().NewScope();
framework::VariableNameMap op_outputs;
for (auto out_name : out_str) {
local_scope.Var("tmp_" + out_name)->GetMutable<phi::DenseTensor>();
op_outputs[out_name].emplace_back("tmp_" + out_name);
}
auto out_var = local_scope.Var("tmp_Out"); // return the Out
// create Out phi::DenseTensor and allocat memory
// create Out phi::DenseTensor and allocate memory
out_var->GetMutable<phi::DenseTensor>()->mutable_data<T>(
common::make_ddim(out_shape), context.GetPlace());
// common::make_ddim(out_shape)
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/tdm_sampler_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,9 @@ void TDMSamplerInner(const framework::ExecutionContext &context,
label_vec[i * sample_res_length + offset] = 0;
mask_vec[i * sample_res_length + offset] = 1;
VLOG(3) << "TDM: node id: " << travel_data[start_offset + layer_idx]
<< " Res append negitive "
<< " Res append negative "
<< output_vec[i * sample_res_length + offset]
<< " Label append negitive "
<< " Label append negative "
<< label_vec[i * sample_res_length + offset]
<< " Mask append value "
<< mask_vec[i * sample_res_length + offset];
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ class TeacherStudentSigmoidLossGradientOp
platform::errors::InvalidArgument(
"When Attr(soft_label) == false, the 2nd dimension of "
"Input(Label) should be 1. But received Input(Label)'s 2nd "
"dimemsion "
"dimension "
"is [%d]",
label_dims[1]));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/tile_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ class TileCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
"We don't support RepeatTimes from tensor or repeat_times_tensor for "
"tile composite grad for now. "));
} else {
VLOG(6) << "Runing tile_grad composite func";
VLOG(6) << "Running tile_grad composite func";
prim::tile_grad<prim::DescTensor>(
x, out_grad, paddle::experimental::IntArray(repeat_times), dx_ptr);
this->RecoverOutputName(x_grad, dx_name);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/top_k_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class TopkKernel : public framework::OpKernel<T> {
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());

// reshape input to a flattern matrix(like flat_inner_dims)
// reshape input to a flatten matrix(like flat_inner_dims)
framework::DDim inputdims = input->dims();
const size_t row =
common::product(common::slice_ddim(inputdims, 0, inputdims.size() - 1));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/top_k_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class TopkXPUKernel : public framework::OpKernel<T> {
int* indices_int_data = RAII_GUARD.alloc_l3_or_gm<int>(indices->numel());
PADDLE_ENFORCE_XDNN_NOT_NULL(indices_int_data);

// reshape input to a flattern matrix(like flat_inner_dims)
// reshape input to a flatten matrix(like flat_inner_dims)
framework::DDim inputdims = input->dims();
const size_t row =
common::product(common::slice_ddim(inputdims, 0, inputdims.size() - 1));
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/transfer_layout_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class TransferLayoutFunctor {
}
VLOG(4) << "TransDataLayoutFromOneDNN: " << in_layout << "->"
<< target_layout;
// Case2 - transfrom from ONEDNN OPKernel to Non-ONEDNN OPKernel
// Case2 - transform from ONEDNN OPKernel to Non-ONEDNN OPKernel
// Do transform via ONEDNN lib
phi::funcs::TransDataLayoutFromOneDNN(in_layout,
target_layout,
Expand All @@ -119,11 +119,11 @@ class TransferLayoutFunctor {
dev_ctx_.GetPlace());
}
} else {
// Case3 - transfrom between Non-ONEDNN OPKernels
// Case3 - transform between Non-ONEDNN OPKernels
TransDataLayout(dev_ctx_, in_tensor, &out_tensor);
}
#else
// Case3 - transfrom between Non-ONEDNN OPKernels
// Case3 - transform between Non-ONEDNN OPKernels
TransDataLayout(dev_ctx_, in_tensor, &out_tensor);
#endif
framework::SetTensorToVariable(*in_, out_tensor, out_);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ class Transpose2CompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
std::string dx_name = this->GetOutputName(dx);
std::vector<int> axis =
static_cast<std::vector<int>>(this->Attr<std::vector<int>>("axis"));
VLOG(6) << "Runing transpose2_grad composite func";
VLOG(6) << "Running transpose2_grad composite func";
prim::transpose_grad<prim::DescTensor>(out_grad, axis, dx_ptr);
this->RecoverOutputName(dx, dx_name);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class CompositeGradOpMakerBase {
virtual ~CompositeGradOpMakerBase() = default;

virtual std::vector<std::unique_ptr<framework::OpDesc>> operator()() {
VLOG(3) << "Runing Composite Grad func for " << fwd_op_.Type() << "_grad ";
VLOG(3) << "Running Composite Grad func for " << fwd_op_.Type() << "_grad ";
this->Apply();
std::vector<std::unique_ptr<framework::OpDesc>> ops;
// TODO(jiabin): Support multiple blocks later
Expand Down