Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions paddle/fluid/platform/cpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,13 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
// AVX512F: EBX Bit 16
int avx512f_mask = (1 << 16);
return (reg[1] & avx512f_mask) != 0;
} else if (cpu_isa == avx512_core) {
uint avx512f_mask = (1 << 16);
uint avx512dq_mask = (1 << 17);
uint avx512bw_mask = (1 << 30);
uint avx512vl_mask = (1 << 31);
return ((reg[1] & avx512f_mask) && (reg[1] & avx512dq_mask) &&
+ (reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask));
}
}
#endif
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,17 @@ bool IsCompiledWithMKLDNN() {
#endif
}

bool IsBfloat16() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
if (platform::MayIUse(platform::cpu_isa_t::avx512_core))
return true;
else
return false;
#endif
}

bool IsCompiledWithBrpc() {
#ifndef PADDLE_WITH_DISTRIBUTE
return false;
Expand Down Expand Up @@ -1661,6 +1672,7 @@ All parameter, weight, gradient are variables in Paddle.
m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
m.def("is_compiled_with_xpu", IsCompiledWithXPU);
m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN);
m.def("is_bfloat16", IsBfloat16);
m.def("is_compiled_with_brpc", IsCompiledWithBrpc);
m.def("is_compiled_with_dist", IsCompiledWithDIST);
m.def("_cuda_synchronize", [](const platform::CUDAPlace &place) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import struct

import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp


Expand Down Expand Up @@ -205,4 +205,5 @@ def init_group(self):


if __name__ == '__main__':
unittest.main()
if core.is_bfloat16():
unittest.main()