Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class ActivationOpConverter : public OpConverter {

auto output_name = op_desc.Output("Out")[0];

RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
ReplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
}

protected:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class AffineChannelOpConverter : public OpConverter {
power_weights.get(),
channel_axis);

RreplenishLayerAndOutput(layer, "affine_channel", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "affine_channel", {output_name}, test_mode);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class AnchorGeneratorOpConverter : public OpConverter {
anchor_generator_inputs.size(),
*anchor_generator_plugin);

RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
anchor_generator_layer, "anchor_generator", output_names, test_mode);
}
};
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/inference/tensorrt/convert/arg_max_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ class ArgMaxOpConverter : public OpConverter {
auto output_name = op_desc.Output("Out")[0];
bool keepdims = PADDLE_GET_CONST(bool, op_desc.GetAttr("keepdims"));
if (keepdims) {
RreplenishLayerAndOutput(topk_layer,
"arg_max",
{output_name + "_value", output_name},
test_mode);
ReplenishLayerAndOutput(topk_layer,
"arg_max",
{output_name + "_value", output_name},
test_mode);
} else {
auto squeeze_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *topk_layer->getOutput(1));
Expand All @@ -55,7 +55,7 @@ class ArgMaxOpConverter : public OpConverter {
dims.d[i] = dims.d[i + 1];
}
squeeze_layer->setReshapeDimensions(dims);
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
squeeze_layer, "arg_max", {output_name}, test_mode);
}
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/inference/tensorrt/convert/arg_min_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ class ArgMinOpConverter : public OpConverter {
auto output_name = op_desc.Output("Out")[0];
bool keepdims = PADDLE_GET_CONST(bool, op_desc.GetAttr("keepdims"));
if (keepdims) {
RreplenishLayerAndOutput(topk_layer,
"arg_min",
{output_name + "_value", output_name},
test_mode);
ReplenishLayerAndOutput(topk_layer,
"arg_min",
{output_name + "_value", output_name},
test_mode);
} else {
auto squeeze_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *topk_layer->getOutput(1));
Expand All @@ -55,7 +55,7 @@ class ArgMinOpConverter : public OpConverter {
dims.d[i] = dims.d[i + 1];
}
squeeze_layer->setReshapeDimensions(dims);
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
squeeze_layer, "arg_min", {output_name}, test_mode);
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/assign_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class AssignOpConverter : public OpConverter {
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input);
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "assign", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "assign", {output_name}, test_mode);
}
};

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,10 @@ class BatchNormOpConverter : public OpConverter {
squeeze_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *(layer->getOutput(0)));
squeeze_layer->setReshapeDimensions(squeeze_shape);
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
squeeze_layer, "batchnorm_add_scale", {output_name}, test_mode);
} else {
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
layer, "batchnorm_add_scale", {output_name}, test_mode);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ class BilinearInterpolateV2OpConverter : public OpConverter {
layer->setScales(scales.data(), scales.size());
}

RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
layer, "bilinear_interp_v2", {output_name}, test_mode);
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/bitwise_and_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class BitwiseAndConverter : public OpConverter {
}

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "bitwise_and", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "bitwise_and", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/bitwise_not_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class BitwiseNotConverter : public OpConverter {
}

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "bitwise_not", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "bitwise_not", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/bitwise_or_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class BitwiseOrConverter : public OpConverter {
}

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "bitwise_or", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "bitwise_or", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/bmm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class BMMOpConverter : public OpConverter {
*input2,
nvinfer1::MatrixOperation::kNONE);

RreplenishLayerAndOutput(layer, "bmm", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "bmm", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class CAllReduceOpConverter : public OpConverter {
#endif
auto output_name = op_desc.Output("Out")[0];

RreplenishLayerAndOutput(layer, name, {output_name}, test_mode);
ReplenishLayerAndOutput(layer, name, {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/cast_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class CastOpConverter : public OpConverter {
}

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "cast", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "cast", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/celu_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class CeluOpConverter : public OpConverter {
nvinfer1::ElementWiseOperation::kSUM);

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "celu", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "celu", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/clip_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class ClipOpConverter : public OpConverter {
layer->setBeta(max);

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "clip", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "clip", {output_name}, test_mode);
#else
PADDLE_THROW(
platform::errors::Fatal("clip TRT converter is only supported on TRT "
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/concat_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class ConcatOpConverter : public OpConverter {
engine_, Concatenation, itensors.data(), itensors.size());
layer->setAxis(axis);
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "concat", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "concat", {output_name}, test_mode);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ class CrossMultiheadMatMulOpConverter : public OpConverter {
("shuffle_last_multihead_matmul(Output: " + output_name + ")").c_str());
// return
layer = reshape_after_mha_layer;
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
layer, "cross_multihead_matmul", {output_name}, test_mode);
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/cumsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class CumsumOpConverter : public OpConverter {
cumsum_dim.d[0] = 1;
}
layer->setReshapeDimensions(cumsum_dim);
RreplenishLayerAndOutput(layer, "cumsum", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "cumsum", {output_name}, test_mode);
} else {
int axis = 0;
if (op_desc.HasAttr("axis")) {
Expand Down Expand Up @@ -161,7 +161,7 @@ class CumsumOpConverter : public OpConverter {
nvinfer1::ILoopOutputLayer* loopOut =
loop->addLoopOutput(*curSum->getOutput(0), reverseFlag, axis);
loopOut->setInput(1, *tripLimit);
RreplenishLayerAndOutput(loopOut, "cumsum", {output_name}, test_mode);
ReplenishLayerAndOutput(loopOut, "cumsum", {output_name}, test_mode);
}
#else
VLOG(3) << "Cumsum is not supported when TensorRT < 7.2.2";
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ class DeformableConvOpConverter : public OpConverter {
std::vector<std::string> output_names;
output_names.push_back(op_desc.Output("Output").front());

RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
deformable_conv_layer, "deformable_conv", output_names, test_mode);
} else {
auto* deformable_conv_plugin = new plugin::DeformableConvPluginDynamic(
Expand Down Expand Up @@ -133,7 +133,7 @@ class DeformableConvOpConverter : public OpConverter {
std::vector<std::string> output_names;
output_names.push_back(op_desc.Output("Output").front());

RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
deformable_conv_layer, "deformable_conv", output_names, test_mode);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class DequantizeLinearOpConverter : public OpConverter {
layer->setAxis(axis);
}
auto output_name = op_desc.Output("Y")[0];
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
layer, "dequantize_linear", {output_name}, test_model);
#else
PADDLE_THROW(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/dropout_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class DropoutOpConverter : public OpConverter {
downgrade_in_infer == "upscale_in_train") {
auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *input1);
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "dropout", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "dropout", {output_name}, test_mode);
return;
}

Expand Down Expand Up @@ -75,7 +75,7 @@ class DropoutOpConverter : public OpConverter {
std::move(weight_tensor));
auto output_name = op_desc.Output("Out")[0];

RreplenishLayerAndOutput(layer, "dropout", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "dropout", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/einsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class EinsumOpConverter : public OpConverter {
engine_, Einsum, input_tensors.data(), input_num, equation.c_str());

auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "einsum", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "einsum", {output_name}, test_mode);
#else
VLOG(3) << "Einsum is not supported when TensorRT < 8.2.0";
#endif
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ class ElementwiseTensorOpConverter : public OpConverter {
*(less_layer->getOutput(0)),
*(equal_layer->getOutput(0)),
nvinfer1::ElementWiseOperation::kOR);
RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
} else if (op_type_ == "greater_equal") {
auto* greater_layer =
TRT_ENGINE_ADD_LAYER(engine_,
Expand All @@ -181,7 +181,7 @@ class ElementwiseTensorOpConverter : public OpConverter {
*(greater_layer->getOutput(0)),
*(equal_layer->getOutput(0)),
nvinfer1::ElementWiseOperation::kOR);
RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
} else if (op_type_ == "mod") {
auto* div_layer =
TRT_ENGINE_ADD_LAYER(engine_,
Expand All @@ -203,7 +203,7 @@ class ElementwiseTensorOpConverter : public OpConverter {
*(mul_layer->getOutput(0)),
nvinfer1::ElementWiseOperation::kSUB);
SupportFP32MixPrecision(output_name, op_desc.Type(), layer);
RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
} else {
auto op_pair = ops.find(op_type_);
PADDLE_ENFORCE_NE(
Expand All @@ -217,7 +217,7 @@ class ElementwiseTensorOpConverter : public OpConverter {
auto* layer = TRT_ENGINE_ADD_LAYER(
engine_, ElementWise, *X, *reshape_y_tensor, op_pair->second);
SupportFP32MixPrecision(output_name, op_desc.Type(), layer);
RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
}
}

Expand Down Expand Up @@ -350,7 +350,7 @@ class PowOpConverter : public OpConverter {
auto* layer = TRT_ENGINE_ADD_LAYER(
engine_, ElementWise, *X, *Y, nvinfer1::ElementWiseOperation::kPOW);
SupportFP32MixPrecision(output_name, op_desc.Type(), layer);
RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ class ElementwiseaddTransposeOpConverter : public OpConverter {
engine_->AddDynamicPlugin(inputs.data(), 2, plugin);
std::vector<std::string> output_names;
output_names.emplace_back(op_desc.Output("Out").front());
RreplenishLayerAndOutput(elementwise_layer,
"fuse_elementwiseadd_transpose",
output_names,
test_mode);
ReplenishLayerAndOutput(elementwise_layer,
"fuse_elementwiseadd_transpose",
output_names,
test_mode);
}
}
};
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,12 +171,12 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
} else {
layer = plugin_layer;
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer,
"ManyEmbLayerNormVarlenPluginDynamicV1",
{output_name,
std::string("qkv_plugin_mask"),
std::string("max_seqlen_tensor")},
test_mode);
ReplenishLayerAndOutput(layer,
"ManyEmbLayerNormVarlenPluginDynamicV1",
{output_name,
std::string("qkv_plugin_mask"),
std::string("max_seqlen_tensor")},
test_mode);
}
} else {
for (int i = 0; i < input_num; i++) {
Expand Down Expand Up @@ -247,7 +247,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
}
layer = plugin_layer;
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(
ReplenishLayerAndOutput(
layer, "ManyEmbLayerNormPluginDynamicV1", {output_name}, test_mode);
}
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/equal_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class EqualOpConverter : public OpConverter {

layer = TRT_ENGINE_ADD_LAYER(
engine_, ElementWise, *X, *Y, nvinfer1::ElementWiseOperation::kEQUAL);
RreplenishLayerAndOutput(layer, "equal", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "equal", {output_name}, test_mode);
}
};

Expand Down Expand Up @@ -125,7 +125,7 @@ class NotEqualOpConverter : public OpConverter {
layer = TRT_ENGINE_ADD_LAYER(
engine_, Unary, *layer->getOutput(0), nvinfer1::UnaryOperation::kNOT);

RreplenishLayerAndOutput(layer, "not_equal", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "not_equal", {output_name}, test_mode);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/expand_v2_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ class ExpandOpConverter : public OpConverter {
layer->setInput(2, *sizes_tensor);
layer->setInput(3, *strides_tensor);

RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
ReplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
}

protected:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class FillAnyLikeOpConverter : public OpConverter {
layer->setInput(2, *sizes_tensor);
layer->setInput(3, *strides_tensor);

RreplenishLayerAndOutput(layer, "fill_any_like", {output_name}, test_mode);
ReplenishLayerAndOutput(layer, "fill_any_like", {output_name}, test_mode);
}
};

Expand Down
Loading