Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion python/paddle/tensor/compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -836,7 +836,9 @@ def max(
return ret


MedianRetType = MinMaxRetType
class MedianRetType(NamedTuple):
values: Tensor
indices: Tensor


@ForbidKeywordsDecorator(
Expand Down
13 changes: 9 additions & 4 deletions python/paddle/tensor/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from __future__ import annotations

from typing import TYPE_CHECKING, Literal
from typing import TYPE_CHECKING, Literal, NamedTuple

import numpy as np
from typing_extensions import overload
Expand Down Expand Up @@ -1043,6 +1043,11 @@ def masked_select(x: Tensor, mask: Tensor, name: str | None = None) -> Tensor:
return out


class TopKRetType(NamedTuple):
values: Tensor
indices: Tensor


@param_two_alias(["x", "input"], ["axis", "dim"])
def topk(
x: Tensor,
Expand All @@ -1053,7 +1058,7 @@ def topk(
name: str | None = None,
*,
out: tuple[Tensor, Tensor] | None = None,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

确认一下,这里的 out 的类型和输出不是一样的么?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

median 同样

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. 这个out是由用户传进来的,类型可以不是Namedtuple,但输出一定是Namedtuple。
  2. torch的文档中也只是表明这个out参数得是(Tensor, Tensor),并没有更进一步的要求。

可能写out=None更好些?

Copy link
Contributor

@zhwesky2010 zhwesky2010 Aug 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

median 同样

不是一样的,median、sort、min、max、topk这几个返回的都是两个Tensor,都像torch一样封装了返回类型。

) -> tuple[Tensor, Tensor]:
) -> TopKRetType:
"""
Return values and indices of the k largest or smallest at the optional axis.
If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
Expand Down Expand Up @@ -1129,8 +1134,8 @@ def topk(
out_values, out_indices = out
out_values = paddle.assign(values, output=out_values)
out_indices = paddle.assign(indices, output=out_indices)
return out_values, out_indices
return values, indices
return TopKRetType(values=out_values, indices=out_indices)
return TopKRetType(values=values, indices=indices)
else:
helper = LayerHelper("top_k_v2", **locals())
inputs = {"X": [x]}
Expand Down
11 changes: 10 additions & 1 deletion test/legacy_test/test_top_k_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,10 @@ def run_case(case):
elif case == 'both_input_out':
_ = paddle.topk(x, k, out=(out_values, out_indices))
values, indices = out_values, out_indices
elif case == 'struct_return':
res = paddle.topk(x, k)
values = res.values
indices = res.indices
else:
raise AssertionError

Expand All @@ -108,24 +112,29 @@ def run_case(case):
loss.backward()
return values.numpy(), indices.numpy(), x.grad.numpy()

# run four scenarios
# run five scenarios
v1, i1, g1 = run_case('return')
x.clear_gradient()
v2, i2, g2 = run_case('input_out')
x.clear_gradient()
v3, i3, g3 = run_case('both_return')
x.clear_gradient()
v4, i4, g4 = run_case('both_input_out')
x.clear_gradient()
v5, i5, g5 = run_case('struct_return')

np.testing.assert_allclose(v1, v2, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(v1, v3, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(v1, v4, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(v1, v5, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(i1, i2, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(i1, i3, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(i1, i4, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(i1, i5, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(g1, g2, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(g1, g3, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(g1, g4, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(g1, g5, rtol=1e-6, atol=1e-6)

paddle.enable_static()

Expand Down