Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,6 @@ REGISTER_OP_WITHOUT_GRADIENT(elementwise_floordiv, ops::ElementwiseOp,

REGISTER_OP_CPU_KERNEL(
elementwise_floordiv,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext,
int64_t>);
2 changes: 0 additions & 2 deletions paddle/fluid/operators/elementwise/elementwise_floordiv_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,5 @@ namespace plat = paddle::platform;

REGISTER_OP_CUDA_KERNEL(
elementwise_floordiv,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseFloorDivKernel<plat::CUDADeviceContext, int64_t>);
12 changes: 2 additions & 10 deletions paddle/fluid/operators/elementwise/elementwise_floordiv_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ limitations under the License. */

#pragma once

#include <math.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
Expand Down Expand Up @@ -62,15 +61,8 @@ void elementwise_floor_div(const framework::ExecutionContext &ctx,
const framework::Tensor *x,
const framework::Tensor *y, framework::Tensor *z) {
int axis = ctx.Attr<int>("axis");
auto x_dims = x->dims();
auto y_dims = y->dims();
if (x_dims.size() >= y_dims.size()) {
ElementwiseComputeEx<FloorDivFunctor<T>, DeviceContext, T>(
ctx, x, y, axis, FloorDivFunctor<T>(), z);
} else {
ElementwiseComputeEx<InverseFloorDivFunctor<T>, DeviceContext, T>(
ctx, x, y, axis, InverseFloorDivFunctor<T>(), z);
}
ElementwiseComputeEx<FloorDivFunctor<T>, DeviceContext, T>(
ctx, x, y, axis, FloorDivFunctor<T>(), z);
}

template <typename DeviceContext, typename T>
Expand Down
39 changes: 10 additions & 29 deletions python/paddle/fluid/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from ..layers.layer_function_generator import OpProtoHolder
from ..layers import common_methods
from . import to_variable, no_grad
import paddle

import numpy as np
import six
Expand Down Expand Up @@ -163,26 +162,6 @@ def _scalar_mul_(var, value):
def _scalar_div_(var, value):
return _scalar_elementwise_op_(var, 1.0 / value, 0.0)

# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def _binary_method_creator_(op_type, reverse=False):
import paddle

def __impl__(self, other_var):
import paddle
op = getattr(paddle, op_type)
if reverse:
return op(other_var, self)
else:
return op(self, other_var)

__impl__.__doc__ = """

See paddle.{}""".format(op_type)
__impl__.__name__ = op_type

return __impl__

# for binary operator such as elementwise, compare
def _binary_creator_(method_name,
op_type,
Expand Down Expand Up @@ -281,20 +260,22 @@ def __impl__(self):
## a*b == b*a. Do not need to reverse explicitly
('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__', _binary_creator_('__div__', 'elementwise_div', False,
_scalar_div_)),
('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
False, _scalar_div_)),
('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
None)),
('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
None)),
('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)),
# These binary use paddle.optype
('__div__', _binary_method_creator_('divide', False)),
('__truediv__', _binary_method_creator_('divide', False)),
('__rtruediv__', _binary_method_creator_('divide', True)),
('__rdiv__', _binary_method_creator_('divide', True)),
('__floordiv__', _binary_method_creator_('floor_divide', False)),
('__rfloordiv__', _binary_method_creator_('floor_divide', True)),
('__mod__', _binary_method_creator_('remainder', False)),
('__floordiv__', _binary_creator_('__floordiv__',
'elementwise_floordiv', False, None)),
('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
None)),
## for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
Expand Down
41 changes: 12 additions & 29 deletions python/paddle/fluid/layers/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

import warnings
import inspect
import paddle

from .. import core
from ..framework import Variable, unique_name
Expand Down Expand Up @@ -46,7 +45,6 @@
"__pow__": "A ** B",
"__rpow__": "A **= B",
"__floordiv__": "A //B",
"__rfloordiv__": "A //= B",
"__mod__": "A % B",
"__eq__": "A == B",
"__ne__": "A != B",
Expand Down Expand Up @@ -235,25 +233,6 @@ def _scalar_mul_(var, value):
def _scalar_div_(var, value):
return _scalar_op_(var, 1.0 / value, 0.0)

# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def _binary_method_creator_(op_type, reverse=False):
import paddle

def __impl__(self, other_var):
op = getattr(paddle, op_type)
if reverse:
return op(other_var, self)
else:
return op(self, other_var)

__impl__.__doc__ = """

See paddle.{}""".format(op_type)
__impl__.__name__ = op_type

return __impl__

def _binary_creator_(method_name,
op_type,
reverse=False,
Expand Down Expand Up @@ -360,18 +339,22 @@ def __impl__(self, other_var):
# a*b == b*a. Do not need to reverse explicitly
('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__', _binary_creator_('__div__', 'elementwise_div', False,
_scalar_div_)),
('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
False, _scalar_div_)),
('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
None)),
('__rtruediv__', _binary_creator_('__rtruediv__', 'elementwise_div',
True, None)),
('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)),
# These binary use paddle.optype
('__div__', _binary_method_creator_('divide', False)),
('__rdiv__', _binary_method_creator_('divide', True)),
('__truediv__', _binary_method_creator_('divide', False)),
('__rtruediv__', _binary_method_creator_('divide', True)),
('__floordiv__', _binary_method_creator_('floor_divide', False)),
('__rfloordiv__', _binary_method_creator_('floor_divide', True)),
('__mod__', _binary_method_creator_('remainder', False)),
('__floordiv__', _binary_creator_('__floordiv__',
'elementwise_floordiv', False, None)),
('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
None)),
# for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ def transpiler_test_impl(self):
["listen_and_serv"])
# block1: sum,cast,scale,floor,fill_constant,elementwise_pow,scale
self.assertEqual([op.type for op in pserver.blocks[1].ops], [
"sum", "cast", "fill_constant", "elementwise_div", "floor",
"fill_constant", "elementwise_pow", "scale"
"sum", "cast", "scale", "floor", "fill_constant", "elementwise_pow",
"scale"
])

# block1~2: optimize pass
Expand Down
131 changes: 16 additions & 115 deletions python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,124 +240,25 @@ def test_shape_with_batch_sizes(self):
self.assertEqual((out_result == (2 / x)).all(), True)


class TestDivideAPI(unittest.TestCase):
def setUp(self):
paddle.set_default_dtype("float64")
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))

def check_static_result(self, place):
# rule 1
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = np.array([1, 2, 3])
self.assertRaises(TypeError, paddle.divide, x=x, y=y)

# rule 2: both the inputs are not Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = 2
y = 4
res = paddle.divide(x, y)
exe = fluid.Executor(place)
np_z = exe.run(fluid.default_main_program(),
feed={},
fetch_list=[res])
self.assertEqual(np_z[0] == 0.5, True)

# rule 3:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float32")
self.assertRaises(TypeError, paddle.divide, x=x, y=y)

# rule 4: x is Tensor, y is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = x / y
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 3, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 1.5, 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)

# rule 5: y is Tensor, x is scalar
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = 2
exe = fluid.Executor(place)
res = y / x
np_z = exe.run(fluid.default_main_program(),
feed={"x": np.array([2, 8, 4]).astype('float64')},
fetch_list=[res])
z_expected = np.array([1., 0.25, 0.5])
self.assertEqual((np_z[0] == z_expected).all(), True)

# rule 6: y is Tensor, x is Tensor
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float64")
y = fluid.data(name="y", shape=[3], dtype="float64")
exe = fluid.Executor(place)
res = x / y
np_z = exe.run(fluid.default_main_program(),
feed={
"x": np.array([2, 3, 4]).astype('float64'),
"y": np.array([1, 5, 2]).astype('float64')
},
fetch_list=[res])
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((np_z[0] == z_expected).all(), True)
class TestDivideOp(unittest.TestCase):
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')

def test_static(self):
for place in self.places:
self.check_static_result(place=place)
y_1 = paddle.divide(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True)

def test_dygraph(self):
for place in self.places:
with fluid.dygraph.guard(place):
# rule 1 : avoid numpy.ndarray
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x)
self.assertRaises(TypeError, paddle.divide, x=x, y=np_y)

# rule 2: both the inputs are not Tensor
z = paddle.divide(3, 2)
self.assertEqual(z.numpy()[0] == 1.5, True)

# rule 3: both the inputs are Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x, dtype="float32")
y = paddle.to_tensor(np_y, dtype="float64")
self.assertRaises(TypeError, paddle.divide, x=x, y=y)

# rule 4: x is Tensor, y is scalar
np_x = np.array([2, 3, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = x / y
z_expected = np.array([1., 1.5, 2.])
self.assertEqual((z_expected == z.numpy()).all(), True)

# rule 5: y is Tensor, x is scalar
np_x = np.array([2, 1, 4])
x = paddle.to_tensor(np_x, dtype="int32")
y = 2
z = y / x
z_expected = np.array([1., 2., 0.5])
self.assertEqual((z_expected == z.numpy()).all(), True)

# rule 6: y is Tensor, x is Tensor
np_x = np.array([2, 3, 4])
np_y = np.array([1, 5, 2])
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = x / y
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((z_expected == z.numpy()).all(), True)
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = paddle.divide(x, y)
np_z = z.numpy()
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((np_z == z_expected).all(), True)


if __name__ == '__main__':
Expand Down
Loading