Skip to content

Commit ccf5b80

Browse files
authored
add test (#35710)
1 parent bda154d commit ccf5b80

File tree

3 files changed

+161
-15
lines changed

3 files changed

+161
-15
lines changed

paddle/fluid/inference/tensorrt/convert/pad_op.cc

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -44,22 +44,7 @@ class PadOpConverter : public OpConverter {
4444
const std::vector<int> paddings =
4545
BOOST_GET_CONST(std::vector<int>, op_desc.GetAttr("paddings"));
4646

47-
nvinfer1::Dims input_shape = input->getDimensions();
48-
int nbDims = input_shape.nbDims;
4947
int pad_size = static_cast<int>(paddings.size());
50-
PADDLE_ENFORCE_GE(
51-
nbDims, 2,
52-
platform::errors::InvalidArgument(
53-
"Input X[0]'s dimension should greater than or equal to 2. "
54-
"But received %d.",
55-
nbDims));
56-
PADDLE_ENFORCE_EQ(
57-
(nbDims + 1) * 2, pad_size,
58-
platform::errors::InvalidArgument("Input X[0]'s dimension(nbDims for "
59-
"short) should meet the condition:"
60-
"(nbDims + 1) * 2 == pad_size. But "
61-
"received nbDims:%d, pad_size:%d.",
62-
nbDims, pad_size));
6348

6449
nvinfer1::DimsHW pre_pad(paddings[pad_size - 4], paddings[pad_size - 2]);
6550
nvinfer1::DimsHW post_pad(paddings[pad_size - 3], paddings[pad_size - 1]);

paddle/fluid/inference/tensorrt/op_teller.cc

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -686,6 +686,29 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
686686
VLOG(3) << "The pad layer of TRT only support zero.";
687687
return false;
688688
}
689+
std::vector<int64_t> shape;
690+
auto* block = desc.Block();
691+
for (auto& param_name : desc.Inputs()) {
692+
for (auto& var_name : param_name.second) {
693+
auto* var_desc = block->FindVar(var_name);
694+
shape = var_desc->GetShape();
695+
}
696+
}
697+
int nbDims = shape.size();
698+
std::vector<int> paddings =
699+
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));
700+
int pad_size = paddings.size();
701+
if (nbDims < 2) {
702+
return false;
703+
}
704+
if (nbDims * 2 != pad_size) {
705+
return false;
706+
}
707+
for (int i = 0; i < pad_size - 4; i++) {
708+
if (paddings[i] != 0) {
709+
return false;
710+
}
711+
}
689712
}
690713

691714
if (op_type == "prelu") {
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
16+
from program_config import TensorConfig, ProgramConfig
17+
import numpy as np
18+
import paddle.inference as paddle_infer
19+
from functools import partial
20+
from typing import Optional, List, Callable, Dict, Any, Set
21+
import unittest
22+
23+
24+
class TrtConvertPadTest(TrtLayerAutoScanTest):
25+
def is_program_valid(self, program_config: ProgramConfig) -> bool:
26+
inputs = program_config.inputs
27+
weights = program_config.weights
28+
attrs = [
29+
program_config.ops[i].attrs
30+
for i in range(len(program_config.ops))
31+
]
32+
33+
if attrs[0]['pad_value'] != 0.0:
34+
return False
35+
for x in attrs[0]['paddings']:
36+
if x < 0:
37+
return False
38+
39+
return True
40+
41+
def sample_program_configs(self):
42+
def generate_input1(attrs: List[Dict[str, Any]]):
43+
return np.ones([1, 3, 64, 64]).astype(np.float32)
44+
45+
def generate_weight1(attrs: List[Dict[str, Any]]):
46+
return np.random.random([24, 3, 3, 3]).astype(np.float32)
47+
48+
for pad_value in [0.0, 1.0, 2.0, -100, 100.0]:
49+
for paddings in [[0, 0, 0, 0, 1, 1, 1, 1],
50+
[0, 0, 0, 0, 1, 2, 3, 4],
51+
[0, 0, 1, 1, 1, 1, 1, 1],
52+
[0, 0, 0, 0, -1, -1, 1, 1]]:
53+
dics = [{"pad_value": pad_value, "paddings": paddings}, {}]
54+
55+
ops_config = [{
56+
"op_type": "pad",
57+
"op_inputs": {
58+
"X": ["input_data"]
59+
},
60+
"op_outputs": {
61+
"Out": ["pad_output_data"]
62+
},
63+
"op_attrs": dics[0]
64+
}]
65+
ops = self.generate_op_config(ops_config)
66+
67+
program_config = ProgramConfig(
68+
ops=ops,
69+
weights={},
70+
inputs={
71+
"input_data":
72+
TensorConfig(data_gen=partial(generate_input1, dics))
73+
},
74+
outputs=["pad_output_data"])
75+
76+
yield program_config
77+
78+
def sample_predictor_configs(
79+
self, program_config) -> (paddle_infer.Config, List[int], float):
80+
def generate_dynamic_shape(attrs):
81+
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]}
82+
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]}
83+
self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]}
84+
85+
def clear_dynamic_shape():
86+
self.dynamic_shape.min_input_shape = {}
87+
self.dynamic_shape.max_input_shape = {}
88+
self.dynamic_shape.opt_input_shape = {}
89+
90+
def generate_trt_nodes_num(attrs, dynamic_shape):
91+
for x in range(len(program_config.ops[0].attrs['paddings']) - 4):
92+
if program_config.ops[0].attrs['paddings'][x] != 0:
93+
return 0, 3
94+
return 1, 2
95+
96+
attrs = [
97+
program_config.ops[i].attrs
98+
for i in range(len(program_config.ops))
99+
]
100+
101+
# for static_shape
102+
clear_dynamic_shape()
103+
self.trt_param.precision = paddle_infer.PrecisionType.Float32
104+
yield self.create_inference_config(), generate_trt_nodes_num(
105+
attrs, False), 1e-5
106+
self.trt_param.precision = paddle_infer.PrecisionType.Half
107+
yield self.create_inference_config(), generate_trt_nodes_num(
108+
attrs, False), 1e-2
109+
110+
# for dynamic_shape
111+
generate_dynamic_shape(attrs)
112+
self.trt_param.precision = paddle_infer.PrecisionType.Float32
113+
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
114+
True), 1e-5
115+
self.trt_param.precision = paddle_infer.PrecisionType.Half
116+
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
117+
True), 1e-2
118+
119+
def add_skip_trt_case(self):
120+
def teller1(program_config, predictor_config):
121+
for x in range(len(program_config.ops[0].attrs['paddings']) - 4):
122+
if program_config.ops[0].attrs['paddings'][x] != 0:
123+
return True
124+
return False
125+
126+
self.add_skip_case(
127+
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
128+
"NOT Implemented: we need to add support pad not only inplement on h or w, such as paddings = [0, 0, 1, 1, 1, 1, 1, 1]"
129+
)
130+
pass
131+
132+
def test(self):
133+
self.add_skip_trt_case()
134+
self.run_test()
135+
136+
137+
if __name__ == "__main__":
138+
unittest.main()

0 commit comments

Comments
 (0)