Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions paddle/framework/ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,14 @@ DDim make_ddim(const std::vector<int64_t>& dims) {
return result;
}

DDim make_ddim(const std::vector<int>& dims) {
std::vector<int64_t> tmp;
tmp.reserve(dims.size());
std::transform(dims.begin(), dims.end(), std::back_inserter(tmp),
[](int d) -> int64_t { return static_cast<int64_t>(d); });
return make_ddim(tmp);
}

/// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor<int64_t&> {
Expand Down
2 changes: 2 additions & 0 deletions paddle/framework/ddim.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ DDim make_ddim(const std::vector<int64_t>& dims);
*/
DDim make_ddim(std::initializer_list<int64_t> dims);

DDim make_ddim(const std::vector<int>& dims);

int64_t get(const DDim& dim, int idx);
void set(DDim& dim, int idx, int val);

Expand Down
38 changes: 28 additions & 10 deletions paddle/framework/eigen.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,43 @@ limitations under the License. */

#pragma once

#include <boost/variant.hpp>
#include "paddle/framework/ddim.h"
#include "paddle/framework/tensor.h"
#include "unsupported/Eigen/CXX11/Tensor"

namespace paddle {
namespace framework {

// EigenDim converts paddle::platform::DDim into Eigen::DSizes.
template <int D>
struct EigenDim {
using Type = Eigen::DSizes<Eigen::DenseIndex, D>;
template <int arity>
using EigenDim = Eigen::DSizes<Eigen::DenseIndex, arity>;

static Type From(const DDim& dims) {
PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)");
Type ret;
for (int64_t d = 0; d < arity(dims); d++) {
using EigenDDim = boost::variant<EigenDim<1>, EigenDim<2>, EigenDim<3>,
EigenDim<4>, EigenDim<5>, EigenDim<6>,
EigenDim<7>, EigenDim<8>, EigenDim<9>>;

struct EigenDDimConvertVisitor : public boost::static_visitor<EigenDDim> {
template <typename DimType>
EigenDDim operator()(const DimType& dims) const {
constexpr int arity = DimType::dimensions;
Eigen::DSizes<Eigen::DenseIndex, arity> ret;
for (int64_t d = 0; d < arity; ++d) {
ret[d] = dims[d];
}
return ret;
}
};

inline EigenDDim DDimToEigenDDim(const DDim& dims) {
return boost::apply_visitor(EigenDDimConvertVisitor(), dims);
}

template <typename Visitor>
inline auto VisitEigenDDim(Visitor visitor, const EigenDDim& ddim) ->
typename Visitor::result_type {
return boost::apply_visitor(visitor, ddim);
}

// Interpret paddle::platform::Tensor as EigenTensor and EigenConstTensor.
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
Expand All @@ -47,13 +63,15 @@ struct EigenTensor {
Eigen::TensorMap<Eigen::Tensor<const T, D, MajorType, IndexType>>;

static Type From(Tensor& tensor, DDim dims) {
return Type(tensor.data<T>(), EigenDim<D>::From(dims));
return Type(tensor.data<T>(),
boost::get<EigenDim<D>>(DDimToEigenDDim(dims)));
}

static Type From(Tensor& tensor) { return From(tensor, tensor.dims_); }

static ConstType From(const Tensor& tensor, DDim dims) {
return ConstType(tensor.data<T>(), EigenDim<D>::From(dims));
return ConstType(tensor.data<T>(),
boost::get<EigenDim<D>>(DDimToEigenDDim(dims)));
}

static ConstType From(const Tensor& tensor) {
Expand Down
35 changes: 30 additions & 5 deletions paddle/framework/eigen_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,36 @@
namespace paddle {
namespace framework {

TEST(EigenDim, From) {
EigenDim<3>::Type ed = EigenDim<3>::From(make_ddim({1, 2, 3}));
ASSERT_EQ(1, ed[0]);
ASSERT_EQ(2, ed[1]);
ASSERT_EQ(3, ed[2]);
TEST(EigenDim, FromDDimToEigenDDim) {
auto eigen_ddim = DDimToEigenDDim({1, 2, 3});
auto& eigen_dim = boost::get<EigenDim<3>>(eigen_ddim);
ASSERT_EQ(1, eigen_dim[0]);
ASSERT_EQ(2, eigen_dim[1]);
ASSERT_EQ(3, eigen_dim[2]);
}

struct ProductVisit : public boost::static_visitor<int64_t> {
template <typename EigenDim>
int64_t operator()(const EigenDim& dim) const {
int64_t prod = 1;
for (auto& item : dim) {
prod *= item;
}
return prod;
}
};

TEST(EigenDim, Visit) {
std::vector<int64_t> tmp(5);
int64_t expect = 1;
for (int i = 0; i < 5; ++i) {
tmp[i] = i + 1;
expect *= tmp[i];
}
auto eigen_ddim = DDimToEigenDDim(make_ddim(tmp));

int64_t actual = VisitEigenDDim(ProductVisit(), eigen_ddim);
ASSERT_EQ(expect, actual);
}

TEST(Eigen, Tensor) {
Expand Down
103 changes: 103 additions & 0 deletions paddle/operators/expand_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/expand_op.h"

namespace paddle {
namespace operators {

using framework::Tensor;

class ExpandOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "X must be initialized.");
std::vector<int> expand_times = Attr<std::vector<int>>("expandTimes");
auto* x = ctx.Input<Tensor>("X");
auto x_dims = x->dims();

PADDLE_ENFORCE_EQ(static_cast<size_t>(framework::arity(x_dims)),
expand_times.size(),
"Number of attribute (expandTimes) value must be equal "
"to rank of X.");
PADDLE_ENFORCE_LE(framework::arity(x_dims), 6,
"Rank of X must not be greater than 6.");

std::vector<int64_t> out_shape(x_dims.size());
for (size_t i = 0; i < expand_times.size(); ++i) {
PADDLE_ENFORCE_GE(expand_times[i], 1,
"Each value of expand times should not be "
"less than 1.");
out_shape[i] = x_dims[i] * expand_times[i];
}
auto* out = ctx.Output<Tensor>("Out");
out->Resize(framework::make_ddim(out_shape));
}
};

class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ExpandOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input tensor.");
AddOutput("Out", "Expanded result by tiling input X.");
AddAttr<std::vector<int>>("expandTimes",
"Expand times for each dimension.");
AddComment(R"DOC(
Expand operator tiles the input by given times number. You should set times
number for each dimension by providing attribute 'expandTimes'. Rank of input
tensor should be in [1, 6]. Please draw an attention that size of
'expandTimes' must be same with rank of input tensor.
)DOC");
}
};

class ExpandGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "X must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
auto x_dims = ctx.Input<Tensor>("X")->dims();
std::vector<int> expand_times = Attr<std::vector<int>>("expandTimes");
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));

for (size_t i = 0; i < expand_times.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i] * expand_times[i], out_dims[i],
"Size of each dimension of Input(Out@GRAD) should be "
"equal to multiplication of crroresponding sizes of "
"Input(X) and expandTimes.");
}

if (x_grad) x_grad->Resize(x_dims);
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP(expand, ops::ExpandOp, ops::ExpandOpMaker, expand_grad,
ops::ExpandGradOp);
REGISTER_OP_CPU_KERNEL(expand,
ops::ExpandKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
expand_grad, ops::ExpandGradKernel<paddle::platform::CPUPlace, float>);
23 changes: 23 additions & 0 deletions paddle/operators/expand_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#define EIGEN_USE_GPU

#include "paddle/operators/expand_op.h"

namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(expand,
ops::ExpandKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
expand_grad, ops::ExpandGradKernel<paddle::platform::GPUPlace, float>);
Loading