Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion paddle/fluid/pir/dialect/op_generator/op_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@
)
import gen as vjp_gen

# Note(Galaxy1458) The need_export_symbol_op_list is used
# for some unittests these need to export symbol op compiled with dynamic lib.
need_export_symbol_op_list = ['AbsOp', 'FullOp']

# =====================================
# String Template for h file code gen
# =====================================
Expand Down Expand Up @@ -89,7 +93,7 @@
"""

OP_DECLARE_TEMPLATE = """
class {op_name} : public pir::Op<{op_name}{interfaces}{traits}> {{
class {TEST_API} {op_name} : public pir::Op<{op_name}{interfaces}{traits}> {{
public:
using Op::Op;
static const char *name() {{ return "{dialect_op_name}"; }}
Expand Down Expand Up @@ -1351,8 +1355,12 @@ def AutoCodeGen(op_info_items, all_op_info_items, namespaces, dialect_name):
)

# gen op_declare_str/op_defined_str
TEST_API = ""
if op_class_name in need_export_symbol_op_list:
TEST_API = "TEST_API"
if len(op_non_mutable_attribute_name_list) == 0:
op_declare_str = OP_DECLARE_TEMPLATE.format(
TEST_API=TEST_API,
op_name=op_class_name,
dialect_op_name=op_dialect_name,
interfaces=op_interfaces_str,
Expand All @@ -1372,6 +1380,7 @@ def AutoCodeGen(op_info_items, all_op_info_items, namespaces, dialect_name):
op_defined_str = ""
else:
op_declare_str = OP_DECLARE_TEMPLATE.format(
TEST_API=TEST_API,
op_name=op_class_name,
dialect_op_name=op_dialect_name,
interfaces=op_interfaces_str,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/common/data_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ using bfloat16 = ::phi::dtype::bfloat16;
using pstring = ::phi::dtype::pstring;

// The enum value are consistent with jit/property.proto
enum class DataType {
enum class TEST_API DataType {
UNDEFINED = 0,

BOOL,
Expand Down
9 changes: 5 additions & 4 deletions paddle/phi/common/int_array.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ template <typename T>
class IntArrayBase {
public:
// Constructor support implicit
IntArrayBase() = default;
TEST_API IntArrayBase() = default;

IntArrayBase(const std::vector<int64_t>& vec) : array_(vec) {} // NOLINT

Expand All @@ -58,12 +58,13 @@ class IntArrayBase {
explicit IntArrayBase(const common::DDim& dims);

// The Tensor must have one dim
IntArrayBase(const T& tensor); // NOLINT
TEST_API IntArrayBase(const T& tensor); // NOLINT

// The Tensor in vec must have only one element
IntArrayBase(const std::vector<T>& tensor_list); // NOLINT
TEST_API IntArrayBase(const std::vector<T>& tensor_list); // NOLINT

explicit IntArrayBase(const std::vector<phi::TensorRef>& tensor_ref_list);
TEST_API explicit IntArrayBase(
const std::vector<phi::TensorRef>& tensor_ref_list);

template <typename OtherT>
IntArrayBase(const IntArrayBase<OtherT>& other) : array_(other.GetData()) {}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/infermeta_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ template const std::vector<std::string>& InferMetaContext::AttrAt(
template const Scalar& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<Scalar>& InferMetaContext::AttrAt(size_t idx) const;
template const IntArray& InferMetaContext::AttrAt(size_t idx) const;
template const DataType& InferMetaContext::AttrAt(size_t idx) const;
template TEST_API const DataType& InferMetaContext::AttrAt(size_t idx) const;
template const DataLayout& InferMetaContext::AttrAt(size_t idx) const;
template const Place& InferMetaContext::AttrAt(size_t idx) const;
template const TensorRef& InferMetaContext::AttrAt(size_t idx) const;
Expand Down
24 changes: 12 additions & 12 deletions paddle/phi/core/infermeta_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,32 +41,32 @@ class InferMetaContext {
const MetaConfig& GetMetaConfig() const;

void EmplaceBackInput(MetaTensor input);
void EmplaceBackOutput(MetaTensor output);
void EmplaceBackAttr(Attribute attr);
TEST_API void EmplaceBackOutput(MetaTensor output);
TEST_API void EmplaceBackAttr(Attribute attr);

void EmplaceBackInputs(
paddle::small_vector<MetaTensor, phi::kInputSmallVectorSize> inputs);
void EmplaceBackOutputs(
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs);

virtual const MetaTensor& InputAt(size_t idx) const;
TEST_API virtual const MetaTensor& InputAt(size_t idx) const;

virtual std::vector<const MetaTensor*> InputsBetween(size_t start,
size_t end) const;
virtual paddle::optional<std::vector<const MetaTensor*>>
TEST_API virtual std::vector<const MetaTensor*> InputsBetween(
size_t start, size_t end) const;
TEST_API virtual paddle::optional<std::vector<const MetaTensor*>>
OptionalInputsBetween(size_t start, size_t end) const;

virtual MetaTensor* MutableOutputAt(size_t idx);
virtual std::vector<MetaTensor*> MutableOutputBetween(size_t start,
size_t end);
TEST_API virtual MetaTensor* MutableOutputAt(size_t idx);
TEST_API virtual std::vector<MetaTensor*> MutableOutputBetween(size_t start,
size_t end);

template <typename AttrType>
const AttrType& AttrAt(size_t idx) const;
TEST_API const AttrType& AttrAt(size_t idx) const;

const Attribute& AttrAt(size_t idx) const;
TEST_API const Attribute& AttrAt(size_t idx) const;

const std::pair<int, int>& InputRangeAt(size_t idx) const;
const std::pair<int, int>& OutputRangeAt(size_t idx) const;
TEST_API const std::pair<int, int>& OutputRangeAt(size_t idx) const;

virtual ~InferMetaContext() = default;

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/meta_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ struct MetaConfig {
is_run_mkldnn_kernel(is_run_mkldnn_kernel) {} // NOLINT
};

class MetaTensor {
class TEST_API MetaTensor {
public:
typedef void (*unspecified_bool_type)();

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,9 +211,9 @@ void DropoutNdInferMeta(const MetaTensor& x,
MetaTensor* out,
MetaTensor* mask);

void ElementwiseInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out);
TEST_API void ElementwiseInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out);

void ElementwiseRawInferMeta(const MetaTensor& x_meta,
const MetaTensor& y_meta,
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/infermeta/nullary.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ void CreateVecShapeInferMeta(const std::vector<int64_t>& shape,

void CreateArrayInferMeta(DataType dtype, MetaTensor* out);

void CreateInferMeta(const IntArray& shape, DataType dtype, MetaTensor* out);
TEST_API void CreateInferMeta(const IntArray& shape,
DataType dtype,
MetaTensor* out);

void CreateInferMetaBase(const std::vector<int64_t>& shape,
DataType dtype,
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/elementwise_add_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@

namespace phi {
template <typename T, typename Context>
void AddKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
TEST_API void AddKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);

template <typename T, typename Context>
DenseTensor Add(const Context& dev_ctx,
Expand Down
17 changes: 3 additions & 14 deletions test/cpp/pir/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,9 @@ paddle_test(ir_value_test SRCS ir_value_test.cc)
paddle_test(ir_op_test SRCS ir_op_test.cc DEPS test_dialect)
paddle_test(ir_region_test SRCS ir_region_test.cc)
paddle_test(ir_builder_test SRCS ir_builder_test.cc)
cc_test_old(
ir_program_test
SRCS
ir_program_test.cc
DEPS
common
gtest
pir
op_dialect_vjp
phi)

cc_test_old(ir_infershape_test SRCS ir_infershape_test.cc DEPS common gtest)

cc_test_old(scalar_attribute_test SRCS scalar_attribute_test.cc DEPS gtest)
paddle_test(ir_program_test SRCS ir_program_test.cc)
paddle_test(ir_infershape_test SRCS ir_infershape_test.cc)
paddle_test(scalar_attribute_test SRCS scalar_attribute_test.cc)

file(
DOWNLOAD https://paddle-ci.gz.bcebos.com/ir_translator_test/resnet50_main.prog
Expand Down
165 changes: 0 additions & 165 deletions test/cpp/pir/core/ir_program_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,142 +69,6 @@ void AddOp::Build(pir::Builder &,
IR_DECLARE_EXPLICIT_TEST_TYPE_ID(AddOp)
IR_DEFINE_EXPLICIT_TYPE_ID(AddOp)

TEST(program_test, program) {
// (1) Init environment.
pir::IrContext *ctx = pir::IrContext::Instance();
pir::Dialect *builtin_dialect =
ctx->GetOrRegisterDialect<pir::BuiltinDialect>();
builtin_dialect->RegisterOp<AddOp>();
pir::Dialect *paddle_dialect =
ctx->GetOrRegisterDialect<paddle::dialect::OperatorDialect>();

// (2) Create an empty program object
pir::Program program(ctx);

// (3) Create a float32 DenseTensor Parameter and save into Program
pir::Type fp32_dtype = pir::Float32Type::get(ctx);
phi::DDim dims = {2, 2};
phi::DataLayout data_layout = phi::DataLayout::NCHW;
phi::LoD lod = {{0, 1, 2}};
size_t offset = 0;
pir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get(
ctx, fp32_dtype, dims, data_layout, lod, offset);

std::vector<float> data_a = {1, 2, 3, 4};
std::unique_ptr<pir::Parameter> parameter_a =
std::make_unique<pir::Parameter>(reinterpret_cast<void *>(data_a.data()),
4 * sizeof(float),
dense_tensor_dtype);
program.SetParameter("a", std::move(parameter_a));
EXPECT_EQ(program.parameters_num() == 1, true);

std::vector<float> data_b = {5, 6, 7, 8};
std::unique_ptr<pir::Parameter> parameter_b =
std::make_unique<pir::Parameter>(reinterpret_cast<void *>(data_b.data()),
4 * sizeof(float),
dense_tensor_dtype);
program.SetParameter("b", std::move(parameter_b));
EXPECT_EQ(program.parameters_num() == 2, true);

// (4) Def a = ParameterOp("a"), and create DenseTensor for a.
pir::Builder builder(ctx, program.block());
auto op1 = builder.Build<pir::ParameterOp>("a", dense_tensor_dtype);

EXPECT_EQ(&program, op1->GetParentProgram());
EXPECT_EQ(op1->result_type(0).dialect().id(), paddle_dialect->id());
using Interface = paddle::dialect::ParameterConvertInterface;
Interface *a_interface =
op1->result_type(0).dialect().GetRegisteredInterface<Interface>();
std::shared_ptr<paddle::framework::Variable> a_var =
a_interface->ParameterToVariable(program.GetParameter("a"));
const phi::DenseTensor &a_tensor = a_var->Get<phi::DenseTensor>();
EXPECT_EQ(a_tensor.numel(), 4);
EXPECT_EQ(a_tensor.dims(), dims);
EXPECT_EQ(a_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype));
EXPECT_EQ(a_tensor.layout(), data_layout);
EXPECT_EQ(a_tensor.lod(), lod);
EXPECT_EQ(a_tensor.offset(), offset);
for (int64_t i = 0; i < a_tensor.numel(); i++) {
EXPECT_EQ(*(a_tensor.data<float>() + i), data_a[i]);
}

// (5) Def b = ParameterOp("b"), and create DenseTensor for b.
auto op2 = builder.Build<pir::ParameterOp>("b", dense_tensor_dtype);

EXPECT_EQ(op2->result_type(0).dialect().id(), paddle_dialect->id());
Interface *b_interface =
op2->result_type(0).dialect().GetRegisteredInterface<Interface>();
std::shared_ptr<paddle::framework::Variable> b_var =
b_interface->ParameterToVariable(program.GetParameter("b"));
const phi::DenseTensor &b_tensor = b_var->Get<phi::DenseTensor>();
EXPECT_EQ(b_tensor.numel(), 4);
EXPECT_EQ(b_tensor.dims(), dims);
EXPECT_EQ(b_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype));
EXPECT_EQ(b_tensor.layout(), data_layout);
EXPECT_EQ(b_tensor.lod(), lod);
EXPECT_EQ(b_tensor.offset(), offset);
for (int64_t i = 0; i < b_tensor.numel(); i++) {
EXPECT_EQ(*(b_tensor.data<float>() + i), data_b[i]);
}

// (6) Def c = AddOp(a, b), execute this op.
auto op3 =
builder.Build<AddOp>(op1->result(0), op2->result(0), dense_tensor_dtype);
phi::CPUContext *dev_ctx = static_cast<phi::CPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(
paddle::platform::CPUPlace()));
phi::DenseTensor c_tensor =
phi::Add<float, phi::CPUContext>(*dev_ctx, a_tensor, b_tensor);
std::shared_ptr<paddle::framework::Variable> variable_c =
std::make_shared<paddle::framework::Variable>();
auto *dst_tensor = variable_c->GetMutable<phi::DenseTensor>();
*dst_tensor = c_tensor;
EXPECT_EQ(dst_tensor->numel(), b_tensor.numel());
EXPECT_EQ(dst_tensor->dims(), b_tensor.dims());
EXPECT_EQ(dst_tensor->dtype(), b_tensor.dtype());
EXPECT_EQ(dst_tensor->layout(), b_tensor.layout());
EXPECT_EQ(dst_tensor->lod(), b_tensor.lod());
EXPECT_EQ(dst_tensor->offset(), b_tensor.offset());
for (int64_t i = 0; i < dst_tensor->numel(); i++) {
EXPECT_EQ(*(dst_tensor->data<float>() + i), data_a[i] + data_b[i]);
}

// (7) Def AbsOp(b)
auto abs_op = builder.Build<paddle::dialect::AbsOp>(op1->result(0));
paddle::dialect::OpYamlInfoInterface interface =
abs_op->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
EXPECT_EQ(std::get<0>(interface.GetOpInfo())[0].name == "x", true);

// (8) Def SetParameterOp(c, "c")
auto op4 = builder.Build<pir::SetParameterOp>(op3->result(0), "c");

EXPECT_EQ(op4->operand(0).type().dialect().id(), paddle_dialect->id());
Interface *c_interface =
op4->operand(0).type().dialect().GetRegisteredInterface<Interface>();
// pir::Parameter *parameter_c =
// c_interface->VariableToParameter(variable_c.get());
std::unique_ptr<pir::Parameter> parameter_c =
c_interface->VariableToParameter(variable_c.get());
EXPECT_EQ(parameter_c->type(), dense_tensor_dtype);
for (int64_t i = 0; i < dst_tensor->numel(); i++) {
EXPECT_EQ(*(dst_tensor->data<float>() + i),
*(static_cast<float *>(parameter_c->data()) + i));
}
program.SetParameter("c", std::move(parameter_c));

// (8) Traverse Program
EXPECT_EQ(program.block()->size() == 5, true);
EXPECT_EQ(program.parameters_num() == 3, true);

std::stringstream ss;
program.Print(ss);

std::stringstream ss_ostram;
ss_ostram << program;

EXPECT_EQ(ss.str(), ss_ostram.str());
}

TEST(program_test, slice_combine_test) {
// (1) Init environment.
pir::IrContext *ctx = pir::IrContext::Instance();
Expand Down Expand Up @@ -261,32 +125,3 @@ TEST(program_test, slice_combine_test) {
// (8) Traverse Program
EXPECT_EQ(program.block()->size() == 4, true);
}

TEST(program_test, builder) {
pir::IrContext *ctx = pir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::OperatorDialect>();
pir::Program program(ctx);
pir::Builder builder = pir::Builder(ctx, program.block());

paddle::dialect::FullOp full_op = builder.Build<paddle::dialect::FullOp>(
std::vector<int64_t>{2, 2}, 1.5, phi::DataType::FLOAT32, phi::CPUPlace());
pir::Type full_op_output = full_op->result_type(0);
EXPECT_EQ(program.block()->size(), 1u);
EXPECT_EQ(program.block()->back(), *full_op.operation());
EXPECT_EQ(full_op.num_operands(), 0u);
EXPECT_EQ(full_op.num_results(), 1u);
EXPECT_EQ(full_op.attributes().size(), 5u);
EXPECT_EQ(
full_op_output.dyn_cast<paddle::dialect::DenseTensorType>().offset() == 0,
true);
for (auto dim : common::vectorize(
full_op_output.dyn_cast<paddle::dialect::DenseTensorType>()
.dims())) {
EXPECT_EQ(dim == 2, true);
}

pir::ConstantOp constant = builder.Build<pir::ConstantOp>(
pir::Int32Attribute::get(ctx, 2), pir::Int32Type::get(ctx));
EXPECT_EQ(program.block()->size() == 2, true);
EXPECT_EQ(constant.value().dyn_cast<pir::Int32Attribute>().data() == 2, true);
}