Complete smooth_l1_loss_op.#3913
Conversation
|
|
||
| #pragma once | ||
| #include "paddle/framework/eigen.h" | ||
| #include "paddle/framework/op_registry.h" |
There was a problem hiding this comment.
Please include this header file:
#include "paddle/platform/hostdevice.h"
paddle/operators/smooth_l1_loss_op.h
Outdated
|
|
||
| template <typename T> | ||
| struct SmoothL1LossFoward { | ||
| __host__ __device__ SmoothL1LossFoward(const T& sigma2) : sigma2(sigma2) {} |
There was a problem hiding this comment.
Please change
__host__ __device__
to
HOSTDEVICE
paddle/operators/smooth_l1_loss_op.h
Outdated
| struct SmoothL1LossFoward { | ||
| __host__ __device__ SmoothL1LossFoward(const T& sigma2) : sigma2(sigma2) {} | ||
|
|
||
| __host__ __device__ T operator()(const T& val) const { |
paddle/operators/smooth_l1_loss_op.h
Outdated
|
|
||
| template <typename T> | ||
| struct SmoothL1LossBackward { | ||
| __host__ __device__ SmoothL1LossBackward(const T& sigma2) : sigma2(sigma2) {} |
paddle/operators/smooth_l1_loss_op.h
Outdated
| struct SmoothL1LossBackward { | ||
| __host__ __device__ SmoothL1LossBackward(const T& sigma2) : sigma2(sigma2) {} | ||
|
|
||
| __host__ __device__ T operator()(const T& val) const { |
paddle/operators/smooth_l1_loss_op.h
Outdated
| using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; | ||
|
|
||
| template <typename T> | ||
| struct SmoothL1LossFoward { |
There was a problem hiding this comment.
SmoothL1LossFoward -- > SmoothL1LossForward
| }; | ||
|
|
||
| template <typename T> | ||
| struct SmoothL1LossBackward { |
There was a problem hiding this comment.
Please unify the name Backward and Grad
There was a problem hiding this comment.
SmoothL1LossBackward is just a callback function used in SmoothL1LossGradKernel
|
|
||
| #pragma once | ||
| #include "paddle/framework/eigen.h" | ||
| #include "paddle/framework/op_registry.h" |
paddle/operators/smooth_l1_loss_op.h
Outdated
|
|
||
| template <typename T> | ||
| struct SmoothL1LossFoward { | ||
| __host__ __device__ SmoothL1LossFoward(const T& sigma2) : sigma2(sigma2) {} |
paddle/operators/smooth_l1_loss_op.h
Outdated
| using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; | ||
|
|
||
| template <typename T> | ||
| struct SmoothL1LossFoward { |
paddle/operators/smooth_l1_loss_op.h
Outdated
| struct SmoothL1LossFoward { | ||
| __host__ __device__ SmoothL1LossFoward(const T& sigma2) : sigma2(sigma2) {} | ||
|
|
||
| __host__ __device__ T operator()(const T& val) const { |
| }; | ||
|
|
||
| template <typename T> | ||
| struct SmoothL1LossBackward { |
There was a problem hiding this comment.
SmoothL1LossBackward is just a callback function used in SmoothL1LossGradKernel
paddle/operators/smooth_l1_loss_op.h
Outdated
|
|
||
| template <typename T> | ||
| struct SmoothL1LossBackward { | ||
| __host__ __device__ SmoothL1LossBackward(const T& sigma2) : sigma2(sigma2) {} |
paddle/operators/smooth_l1_loss_op.h
Outdated
| struct SmoothL1LossBackward { | ||
| __host__ __device__ SmoothL1LossBackward(const T& sigma2) : sigma2(sigma2) {} | ||
|
|
||
| __host__ __device__ T operator()(const T& val) const { |
| Tensor paddle_weights; | ||
| paddle_weights.mutable_data<T>(mat_dims, context.GetPlace()); | ||
| auto weights = EigenMatrix<T>::From(paddle_weights); | ||
| // initialize to 1.0 |
There was a problem hiding this comment.
Please remove 152-159, and change to:
weights.device(place) = weight.constant(static_cast<T>(1.0));
There was a problem hiding this comment.
I tried weights.device(place) = weight.constant(static_cast(1.0)); before, but it didn't work for GPU. Thanks for your suggestion, it works fine for both GPU and CPU.
paddle/operators/smooth_l1_loss_op.h
Outdated
| SmoothL1LossBackward<T>(sigma2)); | ||
|
|
||
| auto* out0 = context.Output<Tensor>(framework::GradVarName("X")); | ||
| auto* out1 = context.Output<Tensor>(framework::GradVarName("Y")); |
There was a problem hiding this comment.
This two lines code can move to line 172. It's better the place of declaration is close to the place of using.
| AddOutput("diff", "Intermediate variable to cache Win*(X-Y).") | ||
| .AsIntermediate(); | ||
| AddOutput("Out", "Final smooth l1 loss of inputs."); | ||
| AddComment(R"DOC( |
There was a problem hiding this comment.
put AddComment in the end?
| PADDLE_ENFORCE_EQ(x->dims(), y->dims(), | ||
| "Dimensions of SmoothL1LossOp's input and target " | ||
| "must be same."); | ||
| PADDLE_ENFORCE_GE(framework::arity(x->dims()), 2, |
| } | ||
|
|
||
| auto* diff = ctx.Output<framework::Tensor>("diff"); | ||
| auto* out = ctx.Output<framework::Tensor>("Out"); |
There was a problem hiding this comment.
auto* diff = ctx.Output<framework::LoDTensor>("diff");
auto* out = ctx.Output<framework::LoDTensor>("Out");| The equation is: | ||
| loss = 0.5 * (sigma * (x - y)) ^ 2 if abs(x - y) < 1 / sigma^2 | ||
| abs(x - y) - 0.5 / sigma^2 otherwise | ||
| )DOC"); |
There was a problem hiding this comment.
77行后空一行,80行前空一行,公式缩进,有可能生成MarkDown,这样有缩进好些
| auto* x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X")); | ||
| auto* y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y")); | ||
|
|
||
| PADDLE_ENFORCE_GE(framework::arity(out_dims), 2, |
| AddInput("Y", "Target of SmoothL1LossOp."); | ||
| AddInput("InsideWeight", "Optional input to scale (X-Y)."); | ||
| AddInput("OutsideWeight", "Optinal input to scale smooth l1 loss."); | ||
| AddOutput("diff", "Intermediate variable to cache Win*(X-Y).") |
paddle/operators/smooth_l1_loss_op.h
Outdated
| diff.device(place) = diff * inside_weight; | ||
| } | ||
|
|
||
| auto in_counts = framework::product(in0->dims()); |
There was a problem hiding this comment.
auto in_counts = in0->numel();use tensor->numel() to replace framework::product
paddle/operators/smooth_l1_loss_op.h
Outdated
| Tensor paddle_errors; | ||
| paddle_errors.mutable_data<T>({static_cast<int>(in_counts)}, | ||
| context.GetPlace()); | ||
| auto errors = EigenVector<T>::Flatten(paddle_errors); |
There was a problem hiding this comment.
Why the name called paddle_errors ? :)
There was a problem hiding this comment.
means paddle tensor errors --> ptensor_errors
paddle/operators/smooth_l1_loss_op.h
Outdated
| SmoothL1LossBackward<T>(sigma2)); | ||
|
|
||
| // compute weights | ||
| Tensor paddle_weights; |
| self.outputs = {'diff': diff, 'Out': loss} | ||
|
|
||
|
|
||
| class SmoothL1LossGradOpTest(GradientChecker): |
There was a problem hiding this comment.
Use the new unit testing framework.
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into fix-3789
paddle/operators/smooth_l1_loss_op.h
Outdated
| out1->mutable_data<T>(context.GetPlace()); | ||
| auto place = context.GetEigenDevice<Place>(); | ||
|
|
||
| auto sigma = static_cast<T>(context.op().Attr<AttrType>("sigma")); |
There was a problem hiding this comment.
context.op().Attr -> context.Attr
| self.check_grad( | ||
| ['X'], | ||
| 'Out', | ||
| max_relative_error=0.08, |
There was a problem hiding this comment.
tune max_relative_error smaller, better to smaller than 0.02.
resolves #3798