Skip to content

Commit 495e7f9

Browse files
authored
Update eigen version to f612df27 (#31832)
* update eigen version to f612df27, test=develop * fix compilation error, test=develop * remove patch command in eigen, test=develop * fix compilation error caused by call Eigen function with float16 and bfloat16, test=develop * fix unittest error, test=develop * fix unittest error caused by precision, test=develop * remove patch files used by old version eigen, test=develop
1 parent 587d99a commit 495e7f9

File tree

11 files changed

+73
-5888
lines changed

11 files changed

+73
-5888
lines changed

cmake/external/eigen.cmake

Lines changed: 3 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414

1515
include(ExternalProject)
1616

17-
# update eigen to the commit id 4da2c6b1 on 03/19/2020
17+
# update eigen to the commit id f612df27 on 03/16/2021
1818
set(EIGEN_PREFIX_DIR ${THIRD_PARTY_PATH}/eigen3)
1919
set(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3/src/extern_eigen3)
2020
set(EIGEN_REPOSITORY https://gitlab.com/libeigen/eigen.git)
21-
set(EIGEN_TAG 4da2c6b1974827b1999bab652a3d4703e1992d26)
21+
set(EIGEN_TAG f612df273689a19d25b45ca4f8269463207c4fee)
2222

2323
cache_third_party(extern_eigen3
2424
REPOSITORY ${EIGEN_REPOSITORY}
@@ -27,48 +27,6 @@ cache_third_party(extern_eigen3
2727

2828
if(WIN32)
2929
add_definitions(-DEIGEN_STRONG_INLINE=inline)
30-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Half.h native_src)
31-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/arch/CUDA/Half.h native_dst)
32-
# For Windows
33-
# which will cause a compilation error in Tensor:74:
34-
# "can not open file 'unistd.h'"
35-
# so use following patch to solve compilation error On Windows.
36-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Tensor native_src2)
37-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/unsupported/Eigen/CXX11/Tensor native_dst2)
38-
# For VS2015
39-
# which will cause a compilation error in TensorBlock.h:1028:
40-
# "syntax error"
41-
# so use following patch to solve compilation error On Windows.
42-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/TensorBlock.h native_src3)
43-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h native_dst3)
44-
set(EIGEN_PATCH_COMMAND copy ${native_src} ${native_dst} /Y && copy ${native_src2} ${native_dst2} /Y && copy ${native_src3} ${native_dst3} /Y)
45-
elseif(LINUX)
46-
# For gxx=4.8, __GXX_ABI_VERSION is less than 1004
47-
# which will cause a compilation error in Geometry_SSE.h:38:
48-
# "no matching function for call to 'pmul(Eigen::internal::Packet4f&, __m128)"
49-
# refer to: https://gitlab.com/libeigen/eigen/-/blob/4da2c6b1974827b1999bab652a3d4703e1992d26/Eigen/src/Core/arch/SSE/PacketMath.h#L33-60
50-
# add -fabi-version=4 could avoid above error, but will cause "double free corruption" when compile with gcc8
51-
# so use following patch to solve compilation error with different version of gcc.
52-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Geometry_SSE.h native_src1)
53-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Geometry/arch/Geometry_SSE.h native_dst1)
54-
# The compiler fully support const expressions since c++14,
55-
# but Eigen use some const expressions such as std::max and std::min, which are not supported in c++11
56-
# add patch to avoid compilation error in c++11
57-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/MathFunctions.h native_src2)
58-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/MathFunctions.h native_dst2)
59-
if(WITH_ROCM)
60-
# For HIPCC Eigen::internal::device::numeric_limits is not EIGEN_DEVICE_FUNC
61-
# which will cause compiler error of using __host__ funciont in __host__ __device__
62-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Meta.h native_src3)
63-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/util/Meta.h native_dst3)
64-
# For HIPCC Eigen::internal::scalar_sum_op<bool,bool> is not EIGEN_DEVICE_FUNC
65-
# which will cause compiler error of using __host__ funciont in __host__ __device__
66-
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/BinaryFunctors.h native_src4)
67-
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/functors/BinaryFunctors.h native_dst4)
68-
set(EIGEN_PATCH_COMMAND cp ${native_src1} ${native_dst1} && cp ${native_src2} ${native_dst2} && cp ${native_src3} ${native_dst3} && cp ${native_src4} ${native_dst4})
69-
else()
70-
set(EIGEN_PATCH_COMMAND cp ${native_src1} ${native_dst1} && cp ${native_src2} ${native_dst2})
71-
endif()
7230
endif()
7331

7432
set(EIGEN_INCLUDE_DIR ${EIGEN_SOURCE_DIR})
@@ -82,7 +40,7 @@ ExternalProject_Add(
8240
PREFIX ${EIGEN_PREFIX_DIR}
8341
SOURCE_DIR ${EIGEN_SOURCE_DIR}
8442
UPDATE_COMMAND ""
85-
PATCH_COMMAND ${EIGEN_PATCH_COMMAND}
43+
PATCH_COMMAND ""
8644
CONFIGURE_COMMAND ""
8745
BUILD_COMMAND ""
8846
INSTALL_COMMAND ""

paddle/fluid/operators/activation_op.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ struct HardShrinkFunctor : public BaseActivationFunctor<T> {
400400
void operator()(Device d, X x, Out out) const {
401401
auto temp1 = x < static_cast<T>(threshold * -1.f);
402402
auto temp2 = x > static_cast<T>(threshold);
403-
out.device(d) = x * (temp1 + temp2 > 0).template cast<T>();
403+
out.device(d) = x * (temp1 + temp2).template cast<T>();
404404
}
405405
};
406406

@@ -417,7 +417,7 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor<T> {
417417
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
418418
auto temp1 = x < static_cast<T>(threshold * -1.f);
419419
auto temp2 = x > static_cast<T>(threshold);
420-
dx.device(d) = dout * (temp1 + temp2 > 0).template cast<T>();
420+
dx.device(d) = dout * (temp1 + temp2).template cast<T>();
421421
}
422422

423423
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }

paddle/fluid/platform/eigen_ext.h

Lines changed: 65 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424

2525
namespace Eigen {
2626

27-
using bfloat16 = paddle::platform::bfloat16;
2827
using complex64 = paddle::platform::complex64;
2928
using complex128 = paddle::platform::complex128;
3029
using float16 = paddle::platform::float16;
@@ -33,30 +32,31 @@ template <typename T>
3332
struct NumTraits;
3433

3534
template <>
36-
struct NumTraits<bfloat16> : GenericNumTraits<bfloat16> {
35+
struct NumTraits<paddle::platform::bfloat16>
36+
: GenericNumTraits<paddle::platform::bfloat16> {
3737
enum {
3838
IsSigned = true,
3939
IsInteger = false,
4040
IsComplex = false,
4141
RequireInitialization = false
4242
};
4343

44-
HOSTDEVICE static inline bfloat16 epsilon() {
44+
HOSTDEVICE static inline paddle::platform::bfloat16 epsilon() {
4545
return paddle::platform::raw_uint16_to_bfloat16(0x3400);
4646
}
47-
HOSTDEVICE static inline bfloat16 dummy_precision() {
48-
return bfloat16(1e-5f);
47+
HOSTDEVICE static inline paddle::platform::bfloat16 dummy_precision() {
48+
return paddle::platform::bfloat16(1e-5f);
4949
}
50-
HOSTDEVICE static inline bfloat16 highest() {
50+
HOSTDEVICE static inline paddle::platform::bfloat16 highest() {
5151
return paddle::platform::raw_uint16_to_bfloat16(0x7f7f);
5252
}
53-
HOSTDEVICE static inline bfloat16 lowest() {
53+
HOSTDEVICE static inline paddle::platform::bfloat16 lowest() {
5454
return paddle::platform::raw_uint16_to_bfloat16(0xff7f);
5555
}
56-
HOSTDEVICE static inline bfloat16 infinity() {
56+
HOSTDEVICE static inline paddle::platform::bfloat16 infinity() {
5757
return paddle::platform::raw_uint16_to_bfloat16(0x7f80);
5858
}
59-
HOSTDEVICE static inline bfloat16 quiet_NaN() {
59+
HOSTDEVICE static inline paddle::platform::bfloat16 quiet_NaN() {
6060
return paddle::platform::raw_uint16_to_bfloat16(0xffc1);
6161
}
6262
};
@@ -137,68 +137,91 @@ namespace numext {
137137
//////////// bfloat methods /////////////
138138

139139
template <>
140-
HOSTDEVICE inline bool(isnan)(const bfloat16& a) {
140+
HOSTDEVICE inline bool(isnan)(const paddle::platform::bfloat16& a) {
141141
return (paddle::platform::isnan)(a);
142142
}
143143

144144
template <>
145-
HOSTDEVICE inline bool(isinf)(const bfloat16& a) {
145+
HOSTDEVICE inline bool(isinf)(const paddle::platform::bfloat16& a) {
146146
return (paddle::platform::isinf)(a);
147147
}
148148

149149
template <>
150-
HOSTDEVICE inline bool(isfinite)(const bfloat16& a) {
150+
HOSTDEVICE inline bool(isfinite)(const paddle::platform::bfloat16& a) {
151151
return (paddle::platform::isfinite)(a);
152152
}
153153

154154
template <>
155-
HOSTDEVICE inline bfloat16 exp(const bfloat16& a) {
156-
return bfloat16(::expf(static_cast<float>(a)));
155+
HOSTDEVICE inline paddle::platform::bfloat16 exp(
156+
const paddle::platform::bfloat16& a) {
157+
return paddle::platform::bfloat16(::expf(static_cast<float>(a)));
157158
}
158159

159160
template <>
160-
HOSTDEVICE inline bfloat16 erf(const bfloat16& a) {
161-
return bfloat16(::erff(static_cast<float>(a)));
161+
HOSTDEVICE inline paddle::platform::bfloat16 erf(
162+
const paddle::platform::bfloat16& a) {
163+
return paddle::platform::bfloat16(::erff(static_cast<float>(a)));
162164
}
163165

164166
template <>
165-
HOSTDEVICE inline bfloat16 log(const bfloat16& a) {
166-
return bfloat16(::logf(static_cast<float>(a)));
167+
HOSTDEVICE inline paddle::platform::bfloat16 log(
168+
const paddle::platform::bfloat16& a) {
169+
return paddle::platform::bfloat16(::logf(static_cast<float>(a)));
167170
}
168171

169172
template <>
170-
HOSTDEVICE inline bfloat16 tanh(const bfloat16& a) {
171-
return bfloat16(::tanhf(static_cast<float>(a)));
173+
HOSTDEVICE inline paddle::platform::bfloat16 tanh(
174+
const paddle::platform::bfloat16& a) {
175+
return paddle::platform::bfloat16(::tanhf(static_cast<float>(a)));
172176
}
173177

174178
template <>
175-
HOSTDEVICE inline bfloat16 sqrt(const bfloat16& a) {
176-
return bfloat16(::sqrtf(static_cast<float>(a)));
179+
HOSTDEVICE inline paddle::platform::bfloat16 sqrt(
180+
const paddle::platform::bfloat16& a) {
181+
return paddle::platform::bfloat16(::sqrtf(static_cast<float>(a)));
177182
}
178183

179184
template <>
180-
HOSTDEVICE inline bfloat16 ceil(const bfloat16& a) {
181-
return bfloat16(::ceilf(static_cast<float>(a)));
185+
HOSTDEVICE inline paddle::platform::bfloat16 ceil(
186+
const paddle::platform::bfloat16& a) {
187+
return paddle::platform::bfloat16(::ceilf(static_cast<float>(a)));
182188
}
183189

184190
template <>
185-
HOSTDEVICE inline bfloat16 floor(const bfloat16& a) {
186-
return bfloat16(::floorf(static_cast<float>(a)));
191+
HOSTDEVICE inline paddle::platform::bfloat16 floor(
192+
const paddle::platform::bfloat16& a) {
193+
return paddle::platform::bfloat16(::floorf(static_cast<float>(a)));
187194
}
188195

189196
template <>
190-
HOSTDEVICE inline bfloat16 round(const bfloat16& a) {
191-
return bfloat16(::roundf(static_cast<float>(a)));
197+
HOSTDEVICE inline paddle::platform::bfloat16 round(
198+
const paddle::platform::bfloat16& a) {
199+
return paddle::platform::bfloat16(::roundf(static_cast<float>(a)));
192200
}
193201

194202
template <>
195-
HOSTDEVICE inline bfloat16 pow(const bfloat16& a, const bfloat16& b) {
196-
return bfloat16(::powf(static_cast<float>(a), static_cast<float>(b)));
203+
HOSTDEVICE inline paddle::platform::bfloat16 pow(
204+
const paddle::platform::bfloat16& a, const paddle::platform::bfloat16& b) {
205+
return paddle::platform::bfloat16(
206+
::powf(static_cast<float>(a), static_cast<float>(b)));
197207
}
198208

199209
template <>
200-
HOSTDEVICE inline bfloat16 abs(const bfloat16& a) {
201-
return bfloat16(::fabs(static_cast<float>(a)));
210+
HOSTDEVICE inline paddle::platform::bfloat16 abs(
211+
const paddle::platform::bfloat16& a) {
212+
return paddle::platform::bfloat16(::fabs(static_cast<float>(a)));
213+
}
214+
215+
template <>
216+
HOSTDEVICE inline paddle::platform::bfloat16 mini(
217+
const paddle::platform::bfloat16& a, const paddle::platform::bfloat16& b) {
218+
return b < a ? b : a;
219+
}
220+
221+
template <>
222+
HOSTDEVICE inline paddle::platform::bfloat16 maxi(
223+
const paddle::platform::bfloat16& a, const paddle::platform::bfloat16& b) {
224+
return a < b ? b : a;
202225
}
203226

204227
//////////// complex64 methods /////////////
@@ -398,5 +421,15 @@ HOSTDEVICE inline float16 abs(const float16& a) {
398421
return float16(::fabs(static_cast<float>(a)));
399422
}
400423

424+
template <>
425+
HOSTDEVICE inline float16 mini(const float16& a, const float16& b) {
426+
return b < a ? b : a;
427+
}
428+
429+
template <>
430+
HOSTDEVICE inline float16 maxi(const float16& a, const float16& b) {
431+
return a < b ? b : a;
432+
}
433+
401434
} // namespace numext
402435
} // namespace Eigen

0 commit comments

Comments
 (0)