Skip to content

Commit 1057b18

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into grid_api
test=develop
2 parents dd46209 + 1f82c0c commit 1057b18

59 files changed

Lines changed: 7420 additions & 991 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

paddle/fluid/framework/op_version_registry.h

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,15 @@ struct ModifyAttr : OpUpdateRecord {
4848
boost::any default_value_;
4949
};
5050
struct NewAttr : OpUpdateRecord {
51-
NewAttr(const std::string& name, const std::string& remark)
52-
: OpUpdateRecord({Type::kNewAttr, remark}), name_(name) {}
51+
NewAttr(const std::string& name, const std::string& remark,
52+
boost::any default_value)
53+
: OpUpdateRecord({Type::kNewAttr, remark}),
54+
name_(name),
55+
default_value_(default_value) {}
5356

5457
private:
5558
std::string name_;
59+
boost::any default_value_;
5660
};
5761

5862
class OpVersionDesc {
@@ -64,9 +68,10 @@ class OpVersionDesc {
6468
return *this;
6569
}
6670

67-
OpVersionDesc& NewAttr(const std::string& name, const std::string& remark) {
68-
infos_.push_back(
69-
std::shared_ptr<OpUpdateRecord>(new compatible::NewAttr(name, remark)));
71+
OpVersionDesc& NewAttr(const std::string& name, const std::string& remark,
72+
boost::any default_value) {
73+
infos_.push_back(std::shared_ptr<OpUpdateRecord>(
74+
new compatible::NewAttr(name, remark, default_value)));
7075
return *this;
7176
}
7277

paddle/fluid/framework/op_version_registry_test.cc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,17 @@ TEST(test_operator_version, test_operator_version) {
3232
"Increased from the original one method to two.", -1)
3333
.NewAttr("size",
3434
"In order to represent a two-dimensional rectangle, the "
35-
"parameter size is added."))
35+
"parameter size is added.",
36+
0))
3637
.AddCheckpoint(
3738
R"ROC(
3839
Add a new attribute [height]
3940
)ROC",
4041
framework::compatible::OpVersionDesc().NewAttr(
4142
"height",
4243
"In order to represent a two-dimensional rectangle, the "
43-
"parameter height is added."));
44+
"parameter height is added.",
45+
0));
4446
}
4547
} // namespace compatible
4648
} // namespace framework

paddle/fluid/operators/pixel_shuffle_op.cc

Lines changed: 65 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -28,40 +28,59 @@ class PixelShuffleOp : public framework::OperatorWithKernel {
2828
"Output(Out) of PixelShuffleOp should not be null."));
2929

3030
auto input_dims = ctx->GetInputDim("X");
31-
PADDLE_ENFORCE_EQ(
32-
input_dims.size(), 4,
33-
platform::errors::InvalidArgument(
34-
"Input should be a 4-D tensor of format [N, C, H, W], but got %u.",
35-
input_dims.size()));
31+
PADDLE_ENFORCE_EQ(input_dims.size(), 4,
32+
platform::errors::InvalidArgument(
33+
"Input should be a 4-D tensor of format [N, C, H, W] "
34+
"or [N, H, W, C], but got %u.",
35+
input_dims.size()));
3636

3737
auto upscale_factor = ctx->Attrs().Get<int>("upscale_factor");
3838

39-
PADDLE_ENFORCE_EQ(input_dims[1] % (upscale_factor * upscale_factor), 0,
40-
platform::errors::InvalidArgument(
41-
"The square of upscale_factor[%u] should divide the "
42-
"number of channel[%u]",
43-
input_dims[1], upscale_factor * upscale_factor));
44-
39+
const std::string data_format =
40+
ctx->Attrs().Get<std::string>("data_format");
41+
const bool channel_last = (data_format == "NHWC");
42+
43+
if (!channel_last) {
44+
PADDLE_ENFORCE_EQ(
45+
input_dims[1] % (upscale_factor * upscale_factor), 0,
46+
platform::errors::InvalidArgument(
47+
"The square of upscale_factor[%u] should divide the "
48+
"number of channel[%u]",
49+
input_dims[1], upscale_factor * upscale_factor));
50+
} else {
51+
PADDLE_ENFORCE_EQ(
52+
input_dims[3] % (upscale_factor * upscale_factor), 0,
53+
platform::errors::InvalidArgument(
54+
"The square of upscale_factor[%u] should divide the "
55+
"number of channel[%u]",
56+
input_dims[3], upscale_factor * upscale_factor));
57+
}
4558
auto output_dims = input_dims;
4659
output_dims[0] = input_dims[0];
47-
output_dims[1] = input_dims[1] / (upscale_factor * upscale_factor);
48-
output_dims[2] = input_dims[2] * upscale_factor;
49-
output_dims[3] = input_dims[3] * upscale_factor;
60+
if (!channel_last) {
61+
output_dims[1] = input_dims[1] / (upscale_factor * upscale_factor);
62+
output_dims[2] = input_dims[2] * upscale_factor;
63+
output_dims[3] = input_dims[3] * upscale_factor;
64+
} else {
65+
output_dims[1] = input_dims[1] * upscale_factor;
66+
output_dims[2] = input_dims[2] * upscale_factor;
67+
output_dims[3] = input_dims[3] / (upscale_factor * upscale_factor);
68+
}
5069
ctx->SetOutputDim("Out", output_dims);
5170
}
5271
};
5372

5473
class PixelShuffleOpMaker : public framework::OpProtoAndCheckerMaker {
5574
public:
5675
void Make() override {
57-
AddInput(
58-
"X",
59-
"(Tensor, default Tensor<float>), "
60-
"the input feature data of PixelShuffleOp, the layout is [N C H W].");
61-
AddOutput(
62-
"Out",
63-
"(Tensor, default Tensor<float>), the output of "
64-
"PixelShuffleOp. The layout is [N,C/factor^2,H*factor,W*factor].");
76+
AddInput("X",
77+
"(Tensor, default Tensor<float>), "
78+
"the input feature data of PixelShuffleOp, the layout is [N, C, "
79+
"H, W] or [N, H, W, C].");
80+
AddOutput("Out",
81+
"(Tensor, default Tensor<float>), the output of "
82+
"PixelShuffleOp. The layout is [N, C/factor^2, H*factor, "
83+
"W*factor] or [N, H*factor, W*factor, C/factor^2].");
6584
AddAttr<int>("upscale_factor",
6685
"the factor to increase spatial resolution by.")
6786
.SetDefault(1)
@@ -70,6 +89,11 @@ class PixelShuffleOpMaker : public framework::OpProtoAndCheckerMaker {
7089
platform::errors::InvalidArgument(
7190
"upscale_factor should be larger than 0."));
7291
});
92+
AddAttr<std::string>(
93+
"data_format",
94+
"An optional string from: \"NHWC\", \"NCHW\". "
95+
"Defaults to \"NHWC\", Specify the data format of the input data.")
96+
.SetDefault("NCHW");
7397

7498
AddComment(R"DOC(
7599
Pixel Shuffle operator
@@ -114,19 +138,30 @@ class PixelShuffleGradOp : public framework::OperatorWithKernel {
114138
platform::errors::NotFound("Output(X@Grad) should not be null"));
115139

116140
auto do_dims = ctx->GetInputDim(framework::GradVarName("Out"));
117-
PADDLE_ENFORCE_EQ(
118-
do_dims.size(), 4,
119-
platform::errors::InvalidArgument(
120-
"Input should be a 4-D tensor of format [N, C, H, W], but got %u.",
121-
do_dims.size()));
141+
PADDLE_ENFORCE_EQ(do_dims.size(), 4,
142+
platform::errors::InvalidArgument(
143+
"Input should be a 4-D tensor of format [N, C, H, W] "
144+
"or [N, H, W, C], but got %u.",
145+
do_dims.size()));
122146

123147
auto upscale_factor = ctx->Attrs().Get<int>("upscale_factor");
124148

149+
const std::string data_format =
150+
ctx->Attrs().Get<std::string>("data_format");
151+
const bool channel_last = (data_format == "NHWC");
152+
125153
auto dx_dims = do_dims;
126154
dx_dims[0] = do_dims[0];
127-
dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor);
128-
dx_dims[2] = do_dims[2] / upscale_factor;
129-
dx_dims[3] = do_dims[3] / upscale_factor;
155+
156+
if (!channel_last) {
157+
dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor);
158+
dx_dims[2] = do_dims[2] / upscale_factor;
159+
dx_dims[3] = do_dims[3] / upscale_factor;
160+
} else {
161+
dx_dims[1] = do_dims[1] / upscale_factor;
162+
dx_dims[2] = do_dims[2] / upscale_factor;
163+
dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor);
164+
}
130165
ctx->SetOutputDim(framework::GradVarName("X"), dx_dims);
131166
}
132167
};

paddle/fluid/operators/pixel_shuffle_op.h

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ limitations under the License. */
1111

1212
#pragma once
1313
#include <algorithm>
14+
#include <string>
1415
#include <vector>
1516
#include "paddle/fluid/framework/op_registry.h"
1617
#include "paddle/fluid/operators/math/math_function.h"
@@ -24,23 +25,33 @@ class PixelShuffleOpKernel : public framework::OpKernel<T> {
2425
void Compute(const framework::ExecutionContext& ctx) const override {
2526
auto* in = ctx.Input<framework::Tensor>("X");
2627
auto* out = ctx.Output<framework::Tensor>("Out");
28+
2729
out->mutable_data<T>(ctx.GetPlace());
2830

2931
int factor = ctx.Attr<int>("upscale_factor");
3032

33+
std::string data_format = ctx.Attr<std::string>("data_format");
34+
bool channel_last = (data_format == "NHWC");
35+
3136
auto in_dims = in->dims();
3237
auto o_dims = out->dims();
3338

3439
framework::Tensor t;
3540
t.ShareDataWith(*in);
36-
t.Resize({in_dims[0], o_dims[1], factor, factor, in_dims[2], in_dims[3]});
37-
41+
if (!channel_last) {
42+
t.Resize({in_dims[0], o_dims[1], factor, factor, in_dims[2], in_dims[3]});
43+
} else {
44+
t.Resize({in_dims[0], in_dims[1], in_dims[2], o_dims[3], factor, factor});
45+
}
3846
std::vector<int> axis = {0, 1, 4, 2, 5, 3};
3947

4048
framework::Tensor o;
4149
o.ShareDataWith(*out);
42-
o.Resize({in_dims[0], o_dims[1], in_dims[2], factor, in_dims[3], factor});
43-
50+
if (!channel_last) {
51+
o.Resize({in_dims[0], o_dims[1], in_dims[2], factor, in_dims[3], factor});
52+
} else {
53+
o.Resize({in_dims[0], in_dims[1], factor, in_dims[2], factor, o_dims[3]});
54+
}
4455
math::Transpose<DeviceContext, T, 6> trans;
4556
auto& dev_ctx = ctx.template device_context<DeviceContext>();
4657
trans(dev_ctx, t, &o, axis);
@@ -58,19 +69,32 @@ class PixelShuffleGradOpKernel : public framework::OpKernel<T> {
5869

5970
int factor = ctx.Attr<int>("upscale_factor");
6071

72+
std::string data_format = ctx.Attr<std::string>("data_format");
73+
bool channel_last = (data_format == "NHWC");
74+
6175
auto do_dims = dout->dims();
6276
auto dx_dims = dx->dims();
6377

6478
framework::Tensor t;
6579
t.ShareDataWith(*dout);
66-
t.Resize({do_dims[0], do_dims[1], dx_dims[2], factor, dx_dims[3], factor});
67-
80+
if (!channel_last) {
81+
t.Resize(
82+
{do_dims[0], do_dims[1], dx_dims[2], factor, dx_dims[3], factor});
83+
} else {
84+
t.Resize(
85+
{do_dims[0], dx_dims[1], factor, dx_dims[2], factor, do_dims[3]});
86+
}
6887
std::vector<int> axis = {0, 1, 3, 5, 2, 4};
6988

7089
framework::Tensor o;
7190
o.ShareDataWith(*dx);
72-
o.Resize({do_dims[0], do_dims[1], factor, factor, dx_dims[2], dx_dims[3]});
73-
91+
if (!channel_last) {
92+
o.Resize(
93+
{do_dims[0], do_dims[1], factor, factor, dx_dims[2], dx_dims[3]});
94+
} else {
95+
o.Resize(
96+
{do_dims[0], dx_dims[1], dx_dims[2], do_dims[3], factor, factor});
97+
}
7498
math::Transpose<DeviceContext, T, 6> trans;
7599
auto& dev_ctx = ctx.template device_context<DeviceContext>();
76100
trans(dev_ctx, t, &o, axis);

paddle/fluid/pybind/CMakeLists.txt

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,23 @@ if(WITH_PYTHON)
7272
set(tmp_impl_file ${impl_file}.tmp)
7373

7474
if(WIN32)
75+
file(WRITE ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/op_function_generator_retry.bat ""
76+
"set build_times=1\n"
77+
":retry\n"
78+
"ECHO op_function_generator run %build_times% time\n"
79+
"${CMAKE_BINARY_DIR}/paddle/fluid/pybind/${CMAKE_BUILD_TYPE}/op_function_generator ${impl_file}\n"
80+
"if %ERRORLEVEL% NEQ 0 (\n"
81+
" set /a build_times=%build_times%+1\n"
82+
" if %build_times% GTR 100 (\n"
83+
" exit /b 1\n"
84+
" ) else (\n"
85+
" goto :retry\n"
86+
" )\n"
87+
")\n"
88+
"exit /b 0")
89+
7590
add_custom_command(TARGET op_function_generator POST_BUILD
76-
COMMAND ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/${CMAKE_BUILD_TYPE}/op_function_generator ${impl_file}
91+
COMMAND ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/op_function_generator_retry.bat
7792
)
7893

7994
if(${CBLAS_PROVIDER} STREQUAL MKLML)

paddle/fluid/pybind/op_function_generator.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
4141
{"fake_quantize_dequantize_moving_average_abs_max",
4242
{"X", "InScale", "InAccum", "InState"}},
4343
{"nll_loss", {"X", "Label", "Weight"}},
44+
{"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}},
4445
{"gather", {"X", "Index", "Axis"}},
4546
};
4647

paddle/scripts/paddle_build.bat

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd6
144144
set build_times=1
145145
:build_tp
146146
echo Build third_party for %build_times% time:
147-
msbuild /m /p:Configuration=Release /verbosity:minimal third_party.vcxproj
147+
msbuild /m /p:Configuration=Release /verbosity:quiet third_party.vcxproj
148148
if %ERRORLEVEL% NEQ 0 (
149149
set /a build_times=%build_times%+1
150150
if %build_times% GTR 3 (
@@ -159,7 +159,7 @@ echo Build third_party successfully!
159159
set build_times=1
160160
:build_paddle
161161
echo Build Paddle for %build_times% time:
162-
msbuild /m /p:Configuration=Release /verbosity:quiet paddle.sln
162+
msbuild /m /p:Configuration=Release /verbosity:minimal paddle.sln
163163
if %ERRORLEVEL% NEQ 0 (
164164
set /a build_times=%build_times%+1
165165
if %build_times% GTR 2 (

python/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
9191
if (WITH_TESTING)
9292
add_subdirectory(paddle/reader/tests)
9393
add_subdirectory(paddle/dataset/tests)
94+
add_subdirectory(paddle/tests)
9495
add_subdirectory(paddle/fluid/tests)
9596
add_subdirectory(paddle/fluid/contrib/tests)
9697
add_subdirectory(paddle/fluid/contrib/slim/tests)

0 commit comments

Comments
 (0)