Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 27 additions & 21 deletions python/paddle/fluid/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4876,29 +4876,35 @@ class LookaheadOptimizer(object):
import paddle
import paddle.fluid as fluid
import numpy as np
import numpy.random as random

x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
y = fluid.layers.fc(input=[x], size=2, act="softmax")
loss = fluid.layers.cross_entropy(input=y, label=label)
loss = fluid.layers.mean(x=loss)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
optimizer = fluid.optimizer.LookaheadOptimizer(sgd,
alpha=0.5,
k=5)
optimizer.minimize(loss)
main_program = fluid.default_main_program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

feeder = fluid.DataFeeder(feed_list=[x, label], place=place)
paddle.enable_static()
x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
y = fluid.layers.fc(input=[x], size=2, act="softmax")
loss = fluid.layers.cross_entropy(input=y, label=label)
loss = fluid.layers.mean(x=loss)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
optimizer = fluid.optimizer.LookaheadOptimizer(sgd,
alpha=0.5,
k=5)
optimizer.minimize(loss)
main_program = fluid.default_main_program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

step = 0
while(step < 10):
step += 1
exe.run(fluid.default_main_program(),
feed=feeder.feed(batch_data))
def train_reader(limit=5):
for i in range(limit):
yield random.random([2]).astype('float32'), random.random([1]).astype('int64')

feeder = fluid.DataFeeder(feed_list=[x, label], place=place)
reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1)

for batch_data in reader():
exe.run(fluid.default_main_program(),
feed=feeder.feed(batch_data))

"""

Expand Down
8 changes: 8 additions & 0 deletions python/paddle/fluid/tests/unittests/test_nonzero_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,14 @@ def test_nonzero_api(self):
expect_out = np.array([[0], [1]])
self.assertTrue(np.allclose(expect_out, np.array(res)))

def test_dygraph_api(self):
data_x = np.array([[True, False], [False, True]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_x)
z = paddle.nonzero(x)
np_z = z.numpy()
expect_out = np.array([[0, 0], [1, 1]])


if __name__ == "__main__":
unittest.main()
14 changes: 6 additions & 8 deletions python/paddle/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,17 @@
__all__ = [
'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam',
'Adamax', 'AdamW', 'DecayedAdagrad', 'DecayedAdagradOptimizer', 'Dpsgd',
'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'LookaheadOptimizer',
'ModelAverage', 'Momentum', 'MomentumOptimizer', 'RMSProp', 'SGD',
'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', 'PiecewiseLR',
'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', 'LinearLrWarmup',
'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', 'ReduceLROnPlateau',
'CosineAnnealingLR'
'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'Momentum', 'MomentumOptimizer',
'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR',
'PiecewiseLR', 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR',
'LinearLrWarmup', 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR',
'ReduceLROnPlateau', 'CosineAnnealingLR'
]


from ..fluid.optimizer import Momentum, Adagrad, Dpsgd, DecayedAdagrad, Ftrl,\
AdagradOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, \
FtrlOptimizer, AdadeltaOptimizer, ModelAverage, \
LookaheadOptimizer
FtrlOptimizer, AdadeltaOptimizer

from .optimizer import Optimizer
from .adam import Adam
Expand Down
19 changes: 8 additions & 11 deletions python/paddle/tensor/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,11 +339,8 @@ def index_select(x, index, axis=0, name=None):
return out


def nonzero(input, as_tuple=False):
def nonzero(x, as_tuple=False):
"""
:alias_main: paddle.nonzero
:alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero

Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
in `input`, each containing the indices (in that dimension) of all non-zero elements
Expand All @@ -353,17 +350,17 @@ def nonzero(input, as_tuple=False):
a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].

Args:
inputs (Variable): The input tensor variable.
x (Tensor): The input tensor variable.
as_tuple (bool): Return type, Tensor or tuple of Tensor.

Returns:
Variable. The data type is int64.
Tensor. The data type is int64.

Examples:

.. code-block:: python
import paddle

paddle.disable_static()
import paddle

x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
Expand Down Expand Up @@ -402,13 +399,13 @@ def nonzero(input, as_tuple=False):
#[]
"""
list_out = []
shape = input.shape
shape = x.shape
rank = len(shape)

if in_dygraph_mode():
outs = core.ops.where_index(input)
outs = core.ops.where_index(x)
else:
outs = layers.where(input)
outs = layers.where(x)

if not as_tuple:
return outs
Expand Down