|
| 1 | +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. |
| 2 | +
|
| 3 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +you may not use this file except in compliance with the License. |
| 5 | +You may obtain a copy of the License at |
| 6 | +
|
| 7 | +http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +
|
| 9 | +Unless required by applicable law or agreed to in writing, software |
| 10 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +See the License for the specific language governing permissions and |
| 13 | +limitations under the License. */ |
| 14 | + |
| 15 | +#include <NvInfer.h> |
| 16 | +#include <sys/types.h> |
| 17 | + |
| 18 | +#include <cstddef> |
| 19 | +#include <cstdint> |
| 20 | +#include <vector> |
| 21 | + |
| 22 | +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" |
| 23 | + |
| 24 | +namespace paddle { |
| 25 | +namespace framework { |
| 26 | +class Scope; |
| 27 | + |
| 28 | +namespace proto { |
| 29 | +class OpDesc; |
| 30 | +} // namespace proto |
| 31 | +} // namespace framework |
| 32 | +} // namespace paddle |
| 33 | + |
| 34 | +namespace paddle { |
| 35 | +namespace inference { |
| 36 | +namespace tensorrt { |
| 37 | + |
| 38 | +class ReduceSumOpConverter : public OpConverter { |
| 39 | + public: |
| 40 | + void operator()(const framework::proto::OpDesc& op, |
| 41 | + const framework::Scope& scope, bool test_mode) override { |
| 42 | + VLOG(4) << "convert a paddle reduce_sum op to tensorrt reduce layer"; |
| 43 | + framework::OpDesc op_desc(op, nullptr); |
| 44 | + |
| 45 | + auto* x = engine_->GetITensor(op_desc.Input("X").front()); |
| 46 | + nvinfer1::Dims input_shape = x->getDimensions(); |
| 47 | + int input_dims = input_shape.nbDims; |
| 48 | + |
| 49 | + bool keep_dim = BOOST_GET_CONST(bool, op_desc.GetAttr("keep_dim")); |
| 50 | + std::vector<int32_t> dim = |
| 51 | + BOOST_GET_CONST(std::vector<int32_t>, op_desc.GetAttr("dim")); |
| 52 | + bool reduce_all = BOOST_GET_CONST(bool, op_desc.GetAttr("reduce_all")); |
| 53 | + |
| 54 | + // Now we only support dynamic_shape mode. |
| 55 | + nvinfer1::IReduceLayer* layer = nullptr; |
| 56 | + if (reduce_all) { |
| 57 | + uint32_t reduce_dim = 0; |
| 58 | + for (int i = 0; i < input_dims; ++i) { |
| 59 | + reduce_dim |= 1 << i; |
| 60 | + } |
| 61 | + layer = TRT_ENGINE_ADD_LAYER(engine_, Reduce, *x, |
| 62 | + nvinfer1::ReduceOperation::kSUM, reduce_dim, |
| 63 | + keep_dim); |
| 64 | + } else { |
| 65 | + auto CvtToBitMask = [&](const std::vector<int32_t>& dims) -> uint32_t { |
| 66 | + uint32_t res = 0; |
| 67 | + for (auto x : dims) { |
| 68 | + if (x < 0) { |
| 69 | + res |= 1 << (x + input_dims); |
| 70 | + } else { |
| 71 | + res |= 1 << x; |
| 72 | + } |
| 73 | + } |
| 74 | + return res; |
| 75 | + }; |
| 76 | + layer = TRT_ENGINE_ADD_LAYER(engine_, Reduce, *x, |
| 77 | + nvinfer1::ReduceOperation::kSUM, |
| 78 | + CvtToBitMask(dim), keep_dim); |
| 79 | + } |
| 80 | + |
| 81 | + auto output_name = op_desc.Output("Out")[0]; |
| 82 | + RreplenishLayerAndOutput(layer, "reduce_sum", {output_name}, test_mode); |
| 83 | + } |
| 84 | +}; |
| 85 | + |
| 86 | +} // namespace tensorrt |
| 87 | +} // namespace inference |
| 88 | +} // namespace paddle |
| 89 | + |
| 90 | +REGISTER_TRT_OP_CONVERTER(reduce_sum, ReduceSumOpConverter); |
0 commit comments