From a64ca4c9c904fed11dd4f893c094fdbe173a156b Mon Sep 17 00:00:00 2001 From: wenbin Date: Fri, 23 Jul 2021 02:49:41 +0000 Subject: [PATCH 01/10] tile op --- .../fluid/inference/api/analysis_predictor.cc | 1 + .../inference/tensorrt/convert/CMakeLists.txt | 1 + .../inference/tensorrt/convert/tile_op.cc | 69 +++++++++++++++++++ paddle/fluid/inference/tensorrt/op_teller.cc | 15 ++++ .../ir/inference/test_trt_tile_op.py | 52 ++++++++++++++ 5 files changed, 138 insertions(+) create mode 100644 paddle/fluid/inference/tensorrt/convert/tile_op.cc create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index d32ec581ce94b4..b31b5f906b9b9b 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1256,6 +1256,7 @@ USE_TRT_CONVERTER(reshape); USE_TRT_CONVERTER(reduce_sum); USE_TRT_CONVERTER(gather_nd); USE_TRT_CONVERTER(reduce_mean); +USE_TRT_CONVERTER(tile); #endif namespace paddle_infer { diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index 2e4a175566a7a1..63d9114e1acda0 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -15,6 +15,7 @@ nv_library(tensorrt_converter reshape_op.cc reduce_op.cc gather_nd_op.cc + tile_op.cc DEPS tensorrt_engine tensorrt_plugin operator scope framework_proto op_registry) nv_test(test_op_converter SRCS test_op_converter.cc DEPS diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc new file mode 100644 index 00000000000000..ea871c63659f88 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -0,0 +1,69 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + +namespace paddle { +namespace framework { +class Scope; +namespace proto { +class OpDesc; +} // namespace proto +} // namespace framework +} // namespace paddle + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * ReshapeOp + */ +class TileOpConverter : public OpConverter { + public: + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + framework::OpDesc op_desc(op, nullptr); + // Declare inputs + auto* input = engine_->GetITensor(op_desc.Input("X")[0]); + nvinfer1::Dims input_shape = input->getDimensions(); + std::vector repeat_times = + BOOST_GET_CONST(std::vector, op_desc.GetAttr("repeat_times")); + int nbDims_num = repeat_times.size(); + nvinfer1::Dims output_dim = input_shape; + nvinfer1::Dims output_stride; + // If input_dims.nbDims + 1 < repeat_times.size() means we + // should add expand 1 on batchsize. trt doesn't support this behavior. + assert(input_shape.nbDims + 1 >= repeat_times.size()); + int diff = input_shape.nbDims + 1 - repeat_times.size(); + if (diff > 0) repeat_times.insert(repeat_times.begin(), diff, 1); + + // Can't expand on batchsize + assert(repeat_times[0] == 1); + output_stride.nbDims = input_shape.nbDims; + for (int i = 0; i < input_shape.nbDims; i++) { + output_dim.d[i] = output_dim.d[i] * repeat_times[i + 1]; + output_stride.d[i] = 1; + } + + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Slice, *input, input_shape, + output_dim, output_stride); + layer->setMode(nvinfer1::SliceMode::kWRAP); + auto output_name = op_desc.Output("Out")[0]; + RreplenishLayerAndOutput(layer, "tile", {output_name}, test_mode); + } +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +REGISTER_TRT_OP_CONVERTER(tile, TileOpConverter); diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 6c6006065435f4..51a0d46dc7cde5 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -131,6 +131,7 @@ struct SimpleOpTypeSetTeller : public Teller { "anchor_generator", "reduce_sum", "reduce_mean", + "tile", }; }; @@ -730,6 +731,20 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } } + if (op_type == "tile") { + // Paddle-TRT does not support the input tensors. + auto inputs = desc.InputArgumentNames(); + for (auto& input : inputs) { + if (input == "repeat_times_tensor" && + desc.Input("repeat_times_tensor").size() > 0) + return false; + if (input == "RepeatTimes" && desc.Input("RepeatTimes").size() > 0) + return false; + } + if (with_dynamic_shape) return false; + if (!with_dynamic_shape && !desc.HasAttr("repeat_times")) return false; + } + if ((*teller)(op_type, desc, use_no_calib_int8)) return true; } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py new file mode 100644 index 00000000000000..d0115cd86ffdc9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py @@ -0,0 +1,52 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from inference_pass_test import InferencePassTest +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.core import PassVersionChecker +from paddle.fluid.core import AnalysisConfig + + +class TRTTileTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[4, 3, 224, 256], dtype="float32") + tile_out = paddle.tile(x=data, repeat_times=[1, 1, 1, 1]) + out = fluid.layers.batch_norm(tile_out, is_test=True) + + self.feeds = { + "data": np.random.random([4, 3, 224, 256]).astype("float32"), + } + self.enable_trt = True + self.trt_parameters = TRTTileTest.TensorRTParam( + 1 << 30, 16, 1, AnalysisConfig.Precision.Float32, False, False) + self.fetch_list = [out] + + def test_check_output(self): + if core.is_compiled_with_cuda(): + use_gpu = True + self.check_output_with_option(use_gpu, flatten=True) + self.assertTrue( + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + + +if __name__ == "__main__": + unittest.main() From c548804c3be8caac41828b80485c5f3e98b6c5fd Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 08:59:40 +0000 Subject: [PATCH 02/10] more uts --- .../ir/inference/test_trt_tile_op.py | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py index d0115cd86ffdc9..cfdc9480aab48b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py @@ -48,5 +48,74 @@ def test_check_output(self): PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) +class TRTTileExpandTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) + out = fluid.layers.batch_norm(tile_out, is_test=True) + + self.feeds = { + "data": np.random.random([1, 1, 1, 1]).astype("float32"), + } + self.enable_trt = True + self.trt_parameters = TRTTileExpandTest.TensorRTParam( + 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, False, False) + self.fetch_list = [out] + + def test_check_output(self): + if core.is_compiled_with_cuda(): + use_gpu = True + self.check_output_with_option(use_gpu, flatten=True) + self.assertTrue( + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + + +class TRTTileExpandStaticTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) + out = fluid.layers.batch_norm(tile_out, is_test=True) + + self.feeds = { + "data": np.random.random([1, 1, 1, 1]).astype("float32"), + } + self.enable_trt = True + self.trt_parameters = TRTTileExpandStaticTest.TensorRTParam( + 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, True, False) + self.fetch_list = [out] + + def test_check_output(self): + if core.is_compiled_with_cuda(): + use_gpu = True + self.check_output_with_option(use_gpu, flatten=True) + self.assertTrue( + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + + +class TRTTileExpandHalfTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) + out = fluid.layers.batch_norm(tile_out, is_test=True) + + self.feeds = { + "data": np.random.random([1, 1, 1, 1]).astype("float32"), + } + self.enable_trt = True + self.trt_parameters = TRTTileExpandHalfTest.TensorRTParam( + 1 << 30, 1, 1, AnalysisConfig.Precision.Half, False, False) + self.fetch_list = [out] + + def test_check_output(self): + if core.is_compiled_with_cuda(): + use_gpu = True + self.check_output_with_option(use_gpu, flatten=True) + self.assertTrue( + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + + if __name__ == "__main__": unittest.main() From cc6fa5c453cfc7e362db7b037e6445219cff19a7 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 11:54:13 +0000 Subject: [PATCH 03/10] disable tile if trt6.0 --- paddle/fluid/inference/tensorrt/convert/tile_op.cc | 6 +++++- paddle/fluid/inference/tensorrt/op_teller.cc | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc index ea871c63659f88..d3840dadfe356a 100644 --- a/paddle/fluid/inference/tensorrt/convert/tile_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -31,13 +31,16 @@ class TileOpConverter : public OpConverter { public: void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { +#if IS_TRT_VERSION_GE(7000) + VLOG(4) << "convert a fluid tile op to tensorrt tile layer"; + framework::OpDesc op_desc(op, nullptr); // Declare inputs auto* input = engine_->GetITensor(op_desc.Input("X")[0]); nvinfer1::Dims input_shape = input->getDimensions(); std::vector repeat_times = BOOST_GET_CONST(std::vector, op_desc.GetAttr("repeat_times")); - int nbDims_num = repeat_times.size(); + nvinfer1::Dims output_dim = input_shape; nvinfer1::Dims output_stride; // If input_dims.nbDims + 1 < repeat_times.size() means we @@ -59,6 +62,7 @@ class TileOpConverter : public OpConverter { layer->setMode(nvinfer1::SliceMode::kWRAP); auto output_name = op_desc.Output("Out")[0]; RreplenishLayerAndOutput(layer, "tile", {output_name}, test_mode); +#endif } }; diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 51a0d46dc7cde5..12fc1e72b488b9 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -730,7 +730,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } } } - +#if IS_TRT_VERSION_GE(7000) if (op_type == "tile") { // Paddle-TRT does not support the input tensors. auto inputs = desc.InputArgumentNames(); @@ -744,6 +744,11 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (with_dynamic_shape) return false; if (!with_dynamic_shape && !desc.HasAttr("repeat_times")) return false; } +#elif + if (op_type == "tile") { + return false; + } +#endif if ((*teller)(op_type, desc, use_no_calib_int8)) return true; } From 1ea279e125acc823124888fea26ac0498614ea98 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 12:05:55 +0000 Subject: [PATCH 04/10] typo --- paddle/fluid/inference/tensorrt/op_teller.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 12fc1e72b488b9..4793fb2c9f980e 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -744,7 +744,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (with_dynamic_shape) return false; if (!with_dynamic_shape && !desc.HasAttr("repeat_times")) return false; } -#elif +#else if (op_type == "tile") { return false; } From fc105dc040e1cfdc14b8f6b8566080104b7c1615 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 12:47:51 +0000 Subject: [PATCH 05/10] fix timeout issue --- python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index 281bbb078b74b6..45e392cd66e90a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -37,4 +37,5 @@ set_tests_properties(test_trt_conv_pass PROPERTIES TIMEOUT 120) set_tests_properties(test_trt_dynamic_shape PROPERTIES TIMEOUT 120) set_tests_properties(test_trt_pool_op PROPERTIES ENVIRONMENT FLAGS_fraction_of_gpu_memory_to_use=0.1 TIMEOUT 45) set_tests_properties(test_trt_reduce_mean_op PROPERTIES TIMEOUT 60) +set_tests_properties(test_trt_tile_op PROPERTIES TIMEOUT 60) endif() From a26381e2e71b2e5356aefc7be48e048a7979be36 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 22:18:26 +0000 Subject: [PATCH 06/10] opteller --- paddle/fluid/inference/tensorrt/op_teller.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 4793fb2c9f980e..472565397624ce 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -51,6 +51,9 @@ struct SimpleOpTypeSetTeller : public Teller { #if IS_TRT_VERSION_GE(7130) teller_set.insert("group_norm"); #endif +#if IS_TRT_VERSION_GE(7000) + teller_set.insert("tile"); +#endif #if CUDA_VERSION >= 10020 teller_set.insert("reshape"); teller_set.insert("reshape2"); @@ -744,10 +747,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (with_dynamic_shape) return false; if (!with_dynamic_shape && !desc.HasAttr("repeat_times")) return false; } -#else - if (op_type == "tile") { - return false; - } #endif if ((*teller)(op_type, desc, use_no_calib_int8)) return true; From bc9be442bced9f07d7adc6ef75ded5d9e37fc066 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 26 Jul 2021 22:20:30 +0000 Subject: [PATCH 07/10] opteller remove duplicate code --- paddle/fluid/inference/tensorrt/op_teller.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 472565397624ce..637371fa81bf9f 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -134,7 +134,6 @@ struct SimpleOpTypeSetTeller : public Teller { "anchor_generator", "reduce_sum", "reduce_mean", - "tile", }; }; From 9fcc16ef77023a6cac4676218a8a76bc602d4081 Mon Sep 17 00:00:00 2001 From: wenbin Date: Wed, 28 Jul 2021 06:14:48 +0000 Subject: [PATCH 08/10] comments. test=document_fix --- paddle/fluid/inference/tensorrt/convert/tile_op.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc index d3840dadfe356a..d9a5ed9ee4536e 100644 --- a/paddle/fluid/inference/tensorrt/convert/tile_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -44,13 +44,13 @@ class TileOpConverter : public OpConverter { nvinfer1::Dims output_dim = input_shape; nvinfer1::Dims output_stride; // If input_dims.nbDims + 1 < repeat_times.size() means we - // should add expand 1 on batchsize. trt doesn't support this behavior. - assert(input_shape.nbDims + 1 >= repeat_times.size()); + // should expand 1 on batchsize. trt doesn't support this behavior. + PADDLE_ENFORCE(input_shape.nbDims + 1 >= repeat_times.size()); int diff = input_shape.nbDims + 1 - repeat_times.size(); if (diff > 0) repeat_times.insert(repeat_times.begin(), diff, 1); // Can't expand on batchsize - assert(repeat_times[0] == 1); + PADDLE_ENFORCE(repeat_times[0] == 1); output_stride.nbDims = input_shape.nbDims; for (int i = 0; i < input_shape.nbDims; i++) { output_dim.d[i] = output_dim.d[i] * repeat_times[i + 1]; From a43d53dcf5dd5e56a5352ca3dd6dfa74b7e4ea65 Mon Sep 17 00:00:00 2001 From: wenbin Date: Wed, 28 Jul 2021 06:32:50 +0000 Subject: [PATCH 09/10] modify PADDLE_ENFORCE. --- paddle/fluid/inference/tensorrt/convert/tile_op.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc index d9a5ed9ee4536e..860d04f03ecf3f 100644 --- a/paddle/fluid/inference/tensorrt/convert/tile_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -45,12 +45,17 @@ class TileOpConverter : public OpConverter { nvinfer1::Dims output_stride; // If input_dims.nbDims + 1 < repeat_times.size() means we // should expand 1 on batchsize. trt doesn't support this behavior. - PADDLE_ENFORCE(input_shape.nbDims + 1 >= repeat_times.size()); + PADDLE_ENFORCE_GE(input_shape.nbDims + 1, repeat_times.size(), + platform::errors::InvalidArgument( + "Can't change batchsize, please check repeat_times")); int diff = input_shape.nbDims + 1 - repeat_times.size(); if (diff > 0) repeat_times.insert(repeat_times.begin(), diff, 1); // Can't expand on batchsize - PADDLE_ENFORCE(repeat_times[0] == 1); + PADDLE_ENFORCE_EQ( + repeat_times[0], 1, + platform::errors::InvalidArgument( + "Can't expand on batchsize, please check repeat_times")); output_stride.nbDims = input_shape.nbDims; for (int i = 0; i < input_shape.nbDims; i++) { output_dim.d[i] = output_dim.d[i] * repeat_times[i + 1]; From f495c0edfb73c2eaaf7c02938d04c48dc0e79ebd Mon Sep 17 00:00:00 2001 From: wenbin Date: Wed, 28 Jul 2021 10:34:27 +0000 Subject: [PATCH 10/10] fix reduce_mean issue --- paddle/fluid/inference/tensorrt/op_teller.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 637371fa81bf9f..2829a740236d27 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -719,12 +719,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, VLOG(3) << "the " << op_type << " does not have attr (keep_dim or dim or " "reduce_all)"; + std::cout << "attr " << desc.HasAttr("keep_dim") << " " + << desc.HasAttr("dim") << " " << desc.HasAttr("reduce_all"); return false; } // The batch size dimension cannot be reduced if it's not dynamic shape. if (!with_dynamic_shape) { - if (desc.HasAttr("reduce_all")) return false; + if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all"))) return false; std::vector dim = BOOST_GET_CONST(std::vector, desc.GetAttr("dim")); for (auto x : dim) {