Skip to content

Commit 6524fa8

Browse files
authored
Add CINN Compile Option (#36292)
Add CINN compile option in CMake. Now you can use CINN in Paddle by `-DWITH_CINN=ON` when `cmake` To test it, you can run `make cinn_lib_test -j` and `ctest -R cinn_lib_test`. Note: 1. You should set ``` export runtime_include_dir=${CINN_SOURCE_DIR}/cinn/runtime/cuda ``` When run test, the `${CINN_SOURCE_DIR}` should be set based on your CINN directory. 2. CINN is under developing now, you may have to change `CINN_GIT_TAG` to the git commit you need.
1 parent 4bd1977 commit 6524fa8

File tree

4 files changed

+271
-0
lines changed

4 files changed

+271
-0
lines changed

CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,7 @@ option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VER
214214
option(WITH_DGC "Use DGC(Deep Gradient Compression) or not" ${WITH_DISTRIBUTE})
215215
option(SANITIZER_TYPE "Choose the type of sanitizer, options are: Address, Leak, Memory, Thread, Undefined" OFF)
216216
option(WITH_LITE "Compile Paddle Fluid with Lite Engine" OFF)
217+
option(WITH_CINN "Compile PaddlePaddle with CINN" OFF)
217218
option(WITH_NCCL "Compile PaddlePaddle with NCCL support" ON)
218219
option(WITH_RCCL "Compile PaddlePaddle with RCCL support" ON)
219220
option(WITH_XPU_BKCL "Compile PaddlePaddle with BAIDU KUNLUN XPU BKCL" OFF)
@@ -299,6 +300,10 @@ if(WITH_GPU)
299300
endif()
300301
endif()
301302

303+
if(WITH_CINN)
304+
include(cinn)
305+
endif()
306+
302307
if(WITH_ROCM)
303308
include(hip)
304309
include(miopen) # set miopen libraries, must before configure

cmake/cinn.cmake

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
if (NOT WITH_CINN)
16+
return()
17+
endif()
18+
19+
# TODO(zhhsplendid): CINN has lots of warnings during early development.
20+
# They will be treated as errors under paddle. We set no-error now and we will
21+
# clean the code in the future.
22+
add_definitions(-w)
23+
24+
######################################
25+
# Build CINN from Git External Project
26+
######################################
27+
include(ExternalProject)
28+
set(CINN_SOURCE_DIR ${THIRD_PARTY_PATH}/CINN)
29+
# TODO(zhhsplendid): Modify git tag after we have release tag
30+
set(CINN_GIT_TAG 3f004bfa3ed273ecf1de8e7b946433038c79b84f)
31+
set(CINN_OPTIONAL_ARGS -DWITH_CUDA=${WITH_GPU} -DWITH_CUDNN=${WITH_GPU} -DPUBLISH_LIBS=ON)
32+
set(CINN_BUILD_COMMAND $(MAKE) cinncore -j && $(MAKE) cinnapi -j)
33+
ExternalProject_Add(
34+
external_cinn
35+
${EXTERNAL_PROJECT_LOG_ARGS}
36+
GIT_REPOSITORY "${GIT_URL}/PaddlePaddle/CINN.git"
37+
GIT_TAG ${CINN_GIT_TAG}
38+
PREFIX ${CINN_SOURCE_DIR}
39+
UPDATE_COMMAND ""
40+
BUILD_COMMAND ${CINN_BUILD_COMMAND}
41+
INSTALL_COMMAND ""
42+
CMAKE_ARGS ${CINN_OPTIONAL_ARGS})
43+
44+
45+
46+
ExternalProject_Get_property(external_cinn BINARY_DIR)
47+
ExternalProject_Get_property(external_cinn SOURCE_DIR)
48+
set(CINN_BINARY_DIR ${BINARY_DIR})
49+
set(CINN_SOURCE_DIR ${SOURCE_DIR})
50+
51+
message(STATUS "CINN BINARY_DIR: ${CINN_BINARY_DIR}")
52+
message(STATUS "CINN SOURCE_DIR: ${CINN_SOURCE_DIR}")
53+
54+
55+
#########################
56+
# Add CINN's dependencies
57+
#########################
58+
59+
# Add absl
60+
set(ABSL_LIB_NAMES
61+
hash
62+
wyhash
63+
city
64+
strings
65+
throw_delegate
66+
bad_any_cast_impl
67+
bad_optional_access
68+
bad_variant_access
69+
raw_hash_set
70+
)
71+
set(ABSL_LIB_DIR "${CINN_BINARY_DIR}/dist/third_party/absl/lib")
72+
set(ABSL_INCLUDE_DIR "${CINN_BINARY_DIR}/dist/third_party/absl/include")
73+
add_library(absl STATIC IMPORTED GLOBAL)
74+
set_target_properties(absl PROPERTIES IMPORTED_LOCATION ${ABSL_LIB_DIR}/libabsl_base.a)
75+
foreach(lib_name ${ABSL_LIB_NAMES})
76+
target_link_libraries(absl INTERFACE ${ABSL_LIB_DIR}/libabsl_${lib_name}.a)
77+
endforeach()
78+
include_directories(${ABSL_INCLUDE_DIR})
79+
80+
# Add isl
81+
set(ISL_LIB_DIR "${CINN_BINARY_DIR}/dist/third_party/isl/lib")
82+
set(ISL_INCLUDE_DIR "${CINN_BINARY_DIR}/dist/third_party/isl/include")
83+
add_library(isl STATIC IMPORTED GLOBAL)
84+
set_target_properties(isl PROPERTIES IMPORTED_LOCATION ${ISL_LIB_DIR}/libisl.a)
85+
include_directories(${ISL_INCLUDE_DIR})
86+
87+
# Add LLVM
88+
set(LLVM_LIB_NAMES
89+
ExecutionEngine
90+
)
91+
set(LLVM_LIB_DIR "${CINN_BINARY_DIR}/dist/third_party/llvm/lib")
92+
set(LLVM_INCLUDE_DIR "${CINN_BINARY_DIR}/dist/third_party/llvm/include")
93+
add_library(llvm STATIC IMPORTED GLOBAL)
94+
set_target_properties(llvm PROPERTIES IMPORTED_LOCATION ${LLVM_LIB_DIR}/libLLVMCore.a)
95+
foreach(lib_name ${LLVM_LIB_NAMES})
96+
target_link_libraries(llvm INTERFACE ${LLVM_LIB_DIR}/libLLVM${lib_name}.a)
97+
endforeach()
98+
include_directories(${LLVM_INCLUDE_DIR})
99+
100+
######################################################
101+
# Put external_cinn and dependencies together as a lib
102+
######################################################
103+
104+
set(CINN_LIB_NAME "libcinnapi.so")
105+
set(CINN_LIB_LOCATION "${CINN_BINARY_DIR}/dist/cinn/lib")
106+
set(CINN_INCLUDE_DIR "${CINN_BINARY_DIR}/dist/cinn/include")
107+
108+
add_library(cinn SHARED IMPORTED GLOBAL)
109+
set_target_properties(cinn PROPERTIES IMPORTED_LOCATION "${CINN_LIB_LOCATION}/${CINN_LIB_NAME}")
110+
include_directories(${CINN_INCLUDE_DIR})
111+
add_dependencies(cinn external_cinn absl isl llvm glog gflag)
112+

paddle/fluid/framework/ir/CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,9 @@ cc_test(pass_test SRCS pass_test.cc DEPS graph pass graph_helper)
143143
cc_test(graph_test SRCS graph_test.cc DEPS graph graph_helper op_registry)
144144
cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_registry)
145145
cc_test(graph_to_program_pass_test SRCS graph_to_program_pass_test.cc DEPS graph_to_program_pass)
146+
if (WITH_CINN)
147+
cc_test(cinn_lib_test SRCS cinn_lib_test.cc DEPS cinn)
148+
endif()
146149
cc_test(cost_model_test SRCS cost_model_test.cc DEPS cost_model op_registry)
147150
cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS graph_pattern_detector)
148151
cc_test(test_op_compat_sensible_pass SRCS op_compat_sensible_pass_tester.cc DEPS op_compat_sensible_pass)
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <glog/logging.h>
16+
#include <gtest/gtest.h>
17+
18+
#include <algorithm>
19+
#include <memory>
20+
#include <random>
21+
#include <vector>
22+
23+
#ifdef PADDLE_WITH_CUDA
24+
#include <cuda_runtime.h>
25+
#endif
26+
27+
#include "cinn/common/target.h"
28+
#include "cinn/frontend/net_builder.h"
29+
#include "cinn/frontend/syntax.h"
30+
#include "cinn/hlir/framework/graph.h"
31+
#include "cinn/hlir/framework/graph_compiler.h"
32+
#include "cinn/hlir/framework/pass.h"
33+
#include "cinn/hlir/framework/tensor.h"
34+
#include "cinn/hlir/op/use_ops.h"
35+
#include "cinn/hlir/pass/use_pass.h"
36+
37+
namespace cinn {
38+
namespace frontend {
39+
40+
Program CreateAddProgram() {
41+
constexpr int M = 32;
42+
constexpr int N = 24;
43+
44+
NetBuilder builder("net_builder");
45+
auto a = builder.CreateInput(Float(32), {M, N});
46+
auto b = builder.CreateInput(Float(32), {M, N});
47+
auto c = builder.add(a, b);
48+
auto d = builder.add(a, c);
49+
auto program = builder.Build();
50+
51+
return program;
52+
}
53+
54+
void SetRandData(hlir::framework::Tensor tensor, Target target) {
55+
auto* data = tensor->mutable_data<float>(target);
56+
std::random_device seed;
57+
std::default_random_engine engine(seed());
58+
std::uniform_real_distribution<float> dist(0.f, 1.f);
59+
size_t num_ele = tensor->shape().numel();
60+
std::vector<float> random_data(num_ele);
61+
for (size_t i = 0; i < num_ele; i++) {
62+
random_data[i] = dist(engine); // All random data
63+
}
64+
65+
#ifdef PADDLE_WITH_CUDA
66+
cudaMemcpy(data, random_data.data(), num_ele * sizeof(float),
67+
cudaMemcpyHostToDevice);
68+
#else
69+
std::copy(random_data.begin(), random_data.end(), data);
70+
#endif
71+
}
72+
73+
TEST(net_build, basic) {
74+
auto program = CreateAddProgram();
75+
// output program
76+
for (size_t i = 0; i < program.size(); i++) {
77+
LOG(INFO) << "instruction: " << program[i];
78+
}
79+
}
80+
81+
TEST(net_build, program_execute_multi_elementwise_add) {
82+
auto program = CreateAddProgram();
83+
#ifdef PADDLE_WITH_CUDA
84+
Target target = common::DefaultNVGPUTarget();
85+
#else
86+
Target target = common::DefaultHostTarget();
87+
#endif
88+
89+
auto graph = std::make_shared<hlir::framework::Graph>(program, target);
90+
std::cout << "graph:\n" << graph->Visualize() << std::endl;
91+
92+
auto scope = BuildScope(target, graph);
93+
hlir::framework::GraphCompiler gc(target, scope, graph);
94+
auto runtime_program = gc.Build();
95+
96+
scope->Var<hlir::framework::Tensor>("A");
97+
scope->Var<hlir::framework::Tensor>("B");
98+
99+
auto A = scope->GetTensor("A");
100+
auto B = scope->GetTensor("B");
101+
SetRandData(A, target);
102+
SetRandData(B, target);
103+
104+
runtime_program->Execute();
105+
}
106+
107+
TEST(net_build, program_execute_fc) {
108+
constexpr int B = 10; // batch size
109+
constexpr int M = 32;
110+
constexpr int K = 18;
111+
constexpr int N = 24;
112+
113+
NetBuilder builder("net_builder");
114+
auto a = builder.CreateInput(Float(32), {B, M, K}, "A");
115+
auto w = builder.CreateInput(Float(32), {N, K}, "W"); // weight
116+
auto b = builder.CreateInput(Float(32), {N}, "B"); // bias
117+
118+
auto mul_out = builder.mul(a, w, 2, 1);
119+
auto add_out = builder.add(mul_out, b);
120+
auto program = builder.Build();
121+
122+
#ifdef PADDLE_WITH_CUDA
123+
Target target = common::DefaultNVGPUTarget();
124+
#else
125+
Target target = common::DefaultHostTarget();
126+
#endif
127+
128+
auto graph = std::make_shared<hlir::framework::Graph>(program, target);
129+
auto scope = BuildScope(target, graph);
130+
hlir::framework::GraphCompiler gc(target, scope, graph);
131+
auto runtime_program = gc.Build();
132+
133+
scope->Var<hlir::framework::Tensor>(std::string(a.id()));
134+
scope->Var<hlir::framework::Tensor>(std::string(w.id()));
135+
scope->Var<hlir::framework::Tensor>(std::string(b.id()));
136+
scope->Var<hlir::framework::Tensor>(std::string(mul_out->id));
137+
138+
auto a_ten = scope->GetTensor(std::string(a.id()));
139+
auto w_ten = scope->GetTensor(std::string(w.id()));
140+
auto b_ten = scope->GetTensor(std::string(b.id()));
141+
auto fake_out_ten = scope->GetTensor(std::string(mul_out->id));
142+
auto add_out_ten = scope->GetTensor(std::string(add_out->id));
143+
SetRandData(a_ten, target);
144+
SetRandData(w_ten, target);
145+
SetRandData(b_ten, target);
146+
147+
runtime_program->Execute();
148+
}
149+
150+
} // namespace frontend
151+
} // namespace cinn

0 commit comments

Comments
 (0)