@@ -251,16 +251,14 @@ def fluid_softmax_with_cross_entropy(
251251 .. code-block:: python
252252
253253 import paddle
254- import numpy as np
255-
256- data = np.random.rand(128).astype("float32")
257- label = np.random.rand(1).astype("int64")
258- data = paddle.to_tensor(data)
259- label = paddle.to_tensor(label)
260- linear = paddle.nn.Linear(128, 100)
261- x = linear(data)
262- out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
254+
255+ logits = paddle.to_tensor([0.4, 0.6, 0.9])
256+ label = paddle.randint(high=2, shape=[1], dtype="int64")
257+
258+ out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
263259 print(out)
260+ # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
261+ # [1.15328646])
264262 """
265263 if _non_static_mode ():
266264 if core .is_compiled_with_npu ():
@@ -1772,7 +1770,6 @@ def ctc_loss(
17721770
17731771 # declarative mode
17741772 import paddle.nn.functional as F
1775- import numpy as np
17761773 import paddle
17771774
17781775 # length of the longest logit sequence
@@ -1784,8 +1781,7 @@ def ctc_loss(
17841781 # class num
17851782 class_num = 3
17861783
1787- np.random.seed(1)
1788- log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
1784+ log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
17891785 [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
17901786
17911787 [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
@@ -1798,30 +1794,30 @@ def ctc_loss(
17981794 [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
17991795
18001796 [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
1801- [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
1802- labels = np.array([[1, 2, 2],
1803- [1, 2, 2]]).astype("int32")
1804- input_lengths = np.array([5, 5]).astype("int64")
1805- label_lengths = np.array([3, 3]).astype("int64")
1806-
1807- log_probs = paddle.to_tensor(log_probs)
1808- labels = paddle.to_tensor(labels)
1809- input_lengths = paddle.to_tensor(input_lengths)
1810- label_lengths = paddle.to_tensor(label_lengths)
1797+ [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
1798+ dtype="float32")
1799+ labels = paddle.to_tensor([[1, 2, 2],
1800+ [1, 2, 2]], dtype="int32")
1801+ input_lengths = paddle.to_tensor([5, 5], dtype="int64")
1802+ label_lengths = paddle.to_tensor([3, 3], dtype="int64")
18111803
18121804 loss = F.ctc_loss(log_probs, labels,
18131805 input_lengths,
18141806 label_lengths,
18151807 blank=0,
18161808 reduction='none')
1817- print(loss) #[3.9179852 2.9076521]
1809+ print(loss)
1810+ # Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1811+ # [3.91798496, 2.90765190])
18181812
18191813 loss = F.ctc_loss(log_probs, labels,
18201814 input_lengths,
18211815 label_lengths,
18221816 blank=0,
18231817 reduction='mean')
1824- print(loss) #[1.1376063]
1818+ print(loss)
1819+ # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1820+ # [1.13760614])
18251821
18261822 """
18271823
@@ -2257,16 +2253,14 @@ def softmax_with_cross_entropy(
22572253 .. code-block:: python
22582254
22592255 import paddle
2260- import numpy as np
2261-
2262- data = np.random.rand(128).astype("float32")
2263- label = np.random.rand(1).astype("int64")
2264- data = paddle.to_tensor(data)
2265- label = paddle.to_tensor(label)
2266- linear = paddle.nn.Linear(128, 100)
2267- x = linear(data)
2268- out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
2256+
2257+ logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
2258+ label = paddle.to_tensor([1], dtype="int64")
2259+
2260+ out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
22692261 print(out)
2262+ # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2263+ # [1.15328646])
22702264 """
22712265 return fluid_softmax_with_cross_entropy (
22722266 logits ,
@@ -4003,18 +3997,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
40033997 .. code-block:: python
40043998
40053999 import paddle
4006- import numpy as np
40074000
40084001 input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
40094002 label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
40104003 output = paddle.nn.functional.soft_margin_loss(input, label)
4004+ print(output)
4005+ # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4006+ # [0.64022040])
4007+
4008+ input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
4009+ label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
4010+ label[label==0]=-1
40114011
4012- input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
4013- label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
4014- label_np[label_np==0]=-1
4015- input = paddle.to_tensor(input_np)
4016- label = paddle.to_tensor(label_np)
40174012 output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
4013+ print(output)
4014+ # Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4015+ # [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
4016+ # [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
4017+ # [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
4018+ # [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
4019+ # [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
40184020 """
40194021 if reduction not in ['sum' , 'mean' , 'none' ]:
40204022 raise ValueError (
0 commit comments