Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <string>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/cpu_vec.h"
#include "paddle/fluid/platform/enforce.h"

Expand Down Expand Up @@ -225,3 +226,14 @@ REGISTER_PASS(conv_affine_channel_fuse_pass,
paddle::framework::ir::ConvAffineChannelFusePass);
REGISTER_PASS(conv_eltwiseadd_affine_channel_fuse_pass,
paddle::framework::ir::ConvEltwiseAddAffineChannelFusePass);
REGISTER_PASS_CAPABILITY(conv_affine_channel_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("affine_channel", 0));
REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("elementwise_add", 0)
.EQ("affine_channel", 0));
12 changes: 12 additions & 0 deletions paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <string>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/cpu_vec.h"
#include "paddle/fluid/platform/enforce.h"

Expand Down Expand Up @@ -372,3 +373,14 @@ REGISTER_PASS(depthwise_conv_bn_fuse_pass,
paddle::framework::ir::DepthwiseConvBNFusePass);
REGISTER_PASS(depthwise_conv_eltwiseadd_bn_fuse_pass,
paddle::framework::ir::DepthwiseConvEltwiseAddBNFusePass);
REGISTER_PASS_CAPABILITY(conv_bn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("batch_norm", 0));
REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("elementwise_add", 0)
.EQ("batch_norm", 0));
10 changes: 10 additions & 0 deletions paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License. */
#include <unordered_set>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_version_registry.h"

#define MAX_NUM_FC 10

Expand Down Expand Up @@ -174,6 +175,10 @@ void BuildRepeatedFCReluPattern(PDPattern* pattern,
if (x->outputs.size() <= 0 || x->inputs.size() <= 0U) {
return false;
}
if (x->IsVar() && x->Var() && x->Var()->GetShape().size() > 2) {
LOG(WARNING) << "repeated fc relu only supports input dims = 2";
return false;
}
int fc_idx = FindFCIdx(x);
if (fc_idx < 0) {
return false;
Expand Down Expand Up @@ -384,3 +389,8 @@ void RepeatedFCReluFusePass::ApplyImpl(ir::Graph* graph) const {

REGISTER_PASS(repeated_fc_relu_fuse_pass,
paddle::framework::ir::RepeatedFCReluFusePass);
REGISTER_PASS_CAPABILITY(repeated_fc_relu_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("fc", 0)
.EQ("relu", 0));
8 changes: 8 additions & 0 deletions paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "paddle/fluid/framework/ir/graph_viz_pass.h"
#include "paddle/fluid/framework/ir/shuffle_channel_detect_pass.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
Expand All @@ -34,6 +35,8 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
const std::string pattern_name = "shufflechannel_pattern";
FusePassBase::Init(pattern_name, graph);

LOG(WARNING) << "There is fluid.layers.shuffle_channel API already, you can "
"use it instead of (reshape + transpose +reshape)";
GraphPatternDetector gpd;
auto* x = gpd.mutable_pattern()
->NewNode("x")
Expand Down Expand Up @@ -93,3 +96,8 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {

REGISTER_PASS(shuffle_channel_detect_pass,
paddle::framework::ir::ShuffleChannelDetectPass);
REGISTER_PASS_CAPABILITY(shuffle_channel_detect_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("reshape2", 0)
.EQ("transpose2", 0));
6 changes: 4 additions & 2 deletions paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ PluginTensorRT* PluginFactoryTensorRT::createPlugin(const char* layer_name,
const char* plugin_type;
DeserializeValue(&serial_data, &serial_length, &plugin_type);

PADDLE_ENFORCE(Has(plugin_type),
"trt plugin type %s does not exists, check it.", plugin_type);
PADDLE_ENFORCE_EQ(
Has(plugin_type), true,
platform::errors::NotFound(
"trt plugin type %s does not exists, check it.", plugin_type));
auto plugin = plugin_registry_[plugin_type](serial_data, serial_length);
owned_plugins_.emplace_back(plugin);

Expand Down
7 changes: 6 additions & 1 deletion paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,12 @@ struct Serializer<std::vector<T>,
DeserializeValue(buffer, buffer_size, &size);
value->resize(size);
size_t nbyte = value->size() * sizeof(T);
PADDLE_ENFORCE_GE(*buffer_size, nbyte);
PADDLE_ENFORCE_GE(
*buffer_size, nbyte,
platform::errors::InvalidArgument("Expect buffer size >= value size in "
"trt plugin deserialization, but got "
"buffer size = %d, value size = %d.",
*buffer_size, nbyte));
std::memcpy(value->data(), *buffer, nbyte);
reinterpret_cast<char const*&>(*buffer) += nbyte;
*buffer_size -= nbyte;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker


class ConvAffineChannelFusePassExplicitPaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding=[1, 1, 1, 1],
bias_attr=False,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible('conv_affine_channel_fuse_pass')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

需要添加断言
self.assertTrue



class ConvAffineChannelFusePassValidPaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding='VALID',
bias_attr=False,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible('conv_affine_channel_fuse_pass')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上



class ConvAffineChannelFusePassSamePaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding='SAME',
bias_attr=False,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible('conv_affine_channel_fuse_pass')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上,以下相同情况请统一修复



class ConvEltwiseAddAffineChannelFusePassExplicitPaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding=[1, 1, 1, 1],
bias_attr=param_attr,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible(
'conv_eltwiseadd_affine_channel_fuse_pass')


class ConvEltwiseAddAffineChannelFusePassValidPaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding='VALID',
bias_attr=param_attr,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible(
'conv_eltwiseadd_affine_channel_fuse_pass')


class ConvEltwiseAddAffineChannelFusePassSamePaddingTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
groups=3,
padding='Same',
bias_attr=param_attr,
act=None)
input_scale = fluid.layers.create_parameter(
shape=[3], dtype="float32")
input_bias = fluid.layers.create_parameter(
shape=[3], dtype="float32")
ac_out = fluid.layers.affine_channel(
x=conv_out, scale=input_scale, bias=input_bias)

self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
}
self.fetch_list = [ac_out]

def test_check_output(self):
self.check_output()

PassVersionChecker.IsCompatible(
'conv_eltwiseadd_affine_channel_fuse_pass')


if __name__ == "__main__":
unittest.main()
Loading