Skip to content
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
b8c2201
add type_hints for ci
megemini Apr 25, 2024
5dab33d
add type_hints unittest
megemini Apr 26, 2024
c11b093
tmp test for type hints
megemini Apr 26, 2024
436175f
change mypy version
megemini Apr 26, 2024
f6927c2
from __future__ import annotations
megemini Apr 26, 2024
c4ba2bf
tmp math.py docstring trigger ci
megemini Apr 26, 2024
9fba61a
tmp trigger ci
megemini Apr 26, 2024
36ba294
tmp debug mypy
megemini Apr 26, 2024
68354cb
fix paddle_build.sh
megemini Apr 27, 2024
7c2a715
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
megemini Apr 27, 2024
9654c26
setup with pyi
megemini Apr 27, 2024
732aff8
force reinstall
megemini Apr 27, 2024
458802a
setup.py type hints
megemini Apr 27, 2024
363ec84
restore math.py
megemini May 2, 2024
41aa797
update print_signatures.py for trigger type annotation ci
megemini May 2, 2024
c8775e3
update print_signatures.py member_dict for trigger type annotation ci
megemini May 2, 2024
0dae862
restore print_signatures.py
megemini May 2, 2024
5c996a0
get_api_md5 with ArgSpec & update unittest
megemini May 2, 2024
ce613b2
change math.py type annotation
megemini May 2, 2024
fb6cef6
change math.py type annotation return
megemini May 2, 2024
edc4b23
change math.py type annotation scale & stanh
megemini May 2, 2024
9857d86
update paddle_build.sh
megemini May 5, 2024
a7ed18c
[Update] type checker
megemini May 8, 2024
51fda55
tmp math.py, test=type_checking
megemini May 8, 2024
98dc1df
tmp math.py, test=type_checking
megemini May 9, 2024
eb1f468
tmp math.py, test=type_checking
megemini May 9, 2024
c75c574
tmp math.py, test=type_checking
megemini May 9, 2024
495d0b7
tmp math.py & fix paddle_build.sh, test=type_checking
megemini May 9, 2024
40b66c7
type checking on title
megemini May 10, 2024
0181f23
reduce log
megemini May 10, 2024
e7be07d
change mypy cache dir abspath
megemini May 10, 2024
618c3b9
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
megemini May 20, 2024
72067ed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
megemini May 29, 2024
37ae0ab
[Change] paddle_build.sh func
megemini May 29, 2024
cf37661
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
megemini May 30, 2024
5cc0c4b
[Update] filter api
megemini May 30, 2024
f9c381e
[Update] pyproject.toml & process pool for run
megemini May 30, 2024
06dee11
[Update] restore math.py
megemini May 31, 2024
ae07a13
[Update] restore math.py
megemini May 31, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 59 additions & 3 deletions paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3525,6 +3525,26 @@ function exec_samplecode_test() {
fi
}

function exec_type_hints() {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

唔,这里检查的地方用 type check 吧,比如 exec_type_checking

if [ -d "${PADDLE_ROOT}/build/pr_whl" ];then
pip install ${PADDLE_ROOT}/build/pr_whl/*.whl
else
echo "WARNING: PR wheel is not found. Use develop wheel !!!"
pip install ${PADDLE_ROOT}/build/python/dist/*.whl
fi

python -c "import paddle;print(paddle.__version__);paddle.version.show()"

cd ${PADDLE_ROOT}/tools

python type_hints.py --debug; type_hints_error=$?

if [ "$type_hints_error" != "0" ];then
echo "Example code type checking failed" >&2
exit 5
fi
}


function collect_ccache_hits() {
ccache -s
Expand Down Expand Up @@ -3569,6 +3589,9 @@ function summary_check_problems() {
set +x
local example_code=$1
local example_info=$2
local type_hints_code=$3
local type_hints_info=$4

if [ $example_code -ne 0 ];then
echo "==============================================================================="
echo "*****Example code error***** Please fix the error listed in the information:"
Expand All @@ -3587,6 +3610,25 @@ function summary_check_problems() {
echo "*****Example code PASS*****"
echo "==============================================================================="
fi

if [ $type_hints_code -ne 0 ];then
echo "==============================================================================="
echo "*****Example code type checking error***** Please fix the error listed in the information:"
echo "==============================================================================="
echo "$type_hints_info"
echo "==============================================================================="
echo "*****Example code type checking FAIL*****"
echo "==============================================================================="
exit $type_hints_code
else
echo "==============================================================================="
echo "*****Example code type checking info*****"
echo "==============================================================================="
echo "$type_hints_info"
echo "==============================================================================="
echo "*****Example code type checking PASS*****"
echo "==============================================================================="
fi
set -x
}

Expand Down Expand Up @@ -4274,7 +4316,12 @@ function main() {
fi
{ example_info=$(exec_samplecode_test cpu 2>&1 1>&3 3>/dev/null); } 3>&1
example_code=$?
summary_check_problems $[${example_code_gpu} + ${example_code}] "${example_info_gpu}\n${example_info}"

{ type_hints_info=$(exec_type_hints 2>&1 1>&3 3>/dev/null); } 3>&1
type_hints_code=$?

summary_check_problems $[${example_code_gpu} + ${example_code}] "${example_info_gpu}\n${example_info}" $type_hints_code "$type_hints_info"

assert_api_spec_approvals
;;
build_and_check_cpu)
Expand All @@ -4294,7 +4341,12 @@ function main() {
fi
{ example_info=$(exec_samplecode_test cpu 2>&1 1>&3 3>/dev/null); } 3>&1
example_code=$?
summary_check_problems $[${example_code_gpu} + ${example_code}] "${example_info_gpu}\n${example_info}"

{ type_hints_info=$(exec_type_hints 2>&1 1>&3 3>/dev/null); } 3>&1
type_hints_code=$?

summary_check_problems $[${example_code_gpu} + ${example_code}] "${example_info_gpu}\n${example_info}" $type_hints_code "$type_hints_info"

assert_api_spec_approvals
;;
check_whl_size)
Expand Down Expand Up @@ -4538,7 +4590,11 @@ function main() {
api_example)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个 api_example 应该是很久以前的 CI 流水线入口,我们现在检查示例代码在 Static-Check 流水线,对应上面 build_and_check_cpubuild_and_check_gpu,这个入口没用了,直接删掉就好

{ example_info=$(exec_samplecode_test cpu 2>&1 1>&3 3>/dev/null); } 3>&1
example_code=$?
summary_check_problems $example_code "$example_info"

{ type_hints_info=$(exec_type_hints 2>&1 1>&3 3>/dev/null); } 3>&1
type_hints_code=$?

summary_check_problems $example_code "$example_info" $type_hints_code "$type_hints_info"
;;
test_op_benchmark)
test_op_benchmark
Expand Down
Empty file added python/paddle/py.typed
Empty file.
8 changes: 6 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
math functions
"""

from __future__ import annotations

import math
import warnings

Expand Down Expand Up @@ -212,7 +214,9 @@ def log_(x, name=None):
return _C_ops.log_(x)


def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
def scale(
x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None
) -> int:
"""
Scale operator.

Expand Down Expand Up @@ -322,7 +326,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return helper.append_activation(out)


def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image

这里正常报错,没问题

我删掉了标题里的 typing,再触发一次看看是否可以按照预期没有检查

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

类型检查跳过了,符合预期~

这里可以恢复下了~

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image

话说这里能做到只修改类型提示的情况下,由我来 review 么?不然不利于后续 review 推进

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已经确认,可以在下个 PR 这么搞下

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

def stanh(x, scale_a: int = 1, scale_b=1.7159, name=None):
r"""

stanh activation.
Expand Down
3 changes: 3 additions & 0 deletions python/setup.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -1073,6 +1073,9 @@ if '${WITH_CPP_DIST}' == 'ON':
paddle_lib_test_dir = '${PADDLE_LIB_TEST_DIR}'
install_cpp_dist_and_build_test(paddle_install_dir, paddle_lib_test_dir)

# type hints
package_data['paddle'] = package_data.get('paddle', []) + ['py.typed']
package_data['paddle.tensor'] = package_data.get('paddle.tensor', []) + ['*.pyi']

with redirect_stdout():
setup(name='${PACKAGE_NAME}',
Expand Down
1 change: 1 addition & 0 deletions python/unittest_py/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@ wandb>=0.13 ; python_version<"3.12"
xlsxwriter==3.0.9
xdoctest==1.1.1
ubelt==1.3.3 # just for xdoctest
mypy==1.10.0
7 changes: 7 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -1302,6 +1302,13 @@ def get_package_data_and_package_dir():
ext_modules = []
elif sys.platform == 'darwin':
ext_modules = []

# type hints
package_data['paddle'] = package_data.get('paddle', []) + ['py.typed']
package_data['paddle.tensor'] = package_data.get('paddle.tensor', []) + [
'*.pyi'
]

return package_data, package_dir, ext_modules


Expand Down
26 changes: 26 additions & 0 deletions tools/mypy.ini
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

另外话说这个是否可以放到根目录的 pyproject.toml 呢?现代 python 工具大多配置是放在 pyproject.toml 里的

Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
[mypy]
python_version = 3.8

cache_dir = .mypy_cache

# Miscellaneous strictness flags
allow_redefinition = True
local_partial_types = True
strict = False

# Untyped definitions and calls
check_untyped_defs = True

# Import discovery
follow_imports = normal

# Miscellaneous
warn_unused_configs = True

# Configuring warnings
warn_redundant_casts = True
warn_unused_ignores = True
warn_no_return = True

# Configuring error messages
show_column_numbers = True
12 changes: 9 additions & 3 deletions tools/sampcd_processor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,16 +334,22 @@ def get_api_md5(path):
if not os.path.isfile(API_spec):
return api_md5
pat = re.compile(r'\((paddle[^,]+)\W*document\W*([0-9a-z]{32})')

# insert ArgSpec for changing the API's type annotation can trigger the CI
patArgSpec = re.compile(
r'^(paddle[^,]+)\s+\(ArgSpec.*document\W*([0-9a-z]{32})'
r'^(paddle[^,]+)\s+\((ArgSpec.*),.*document\W*([0-9a-z]{32})'
)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pat_arg_spec

或者使用 PATTERN_ARG_SPEC 放在文件开始作为常量吧


with open(API_spec) as f:
for line in f.readlines():
mo = pat.search(line)
if not mo:
mo = patArgSpec.search(line)

if mo:
api_md5[mo.group(1)] = mo.group(2)
else:
mo = patArgSpec.search(line)
api_md5[mo.group(1)] = f'{mo.group(2)}, {mo.group(3)}'

return api_md5


Expand Down
72 changes: 38 additions & 34 deletions tools/test_sampcd_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,19 +103,23 @@ def tearDown(self):
def test_get_api_md5(self):
res = get_api_md5('paddle/fluid/API_PR.spec')
self.assertEqual(
"ff0f188c95030158cc6398d2a6c55one", res['paddle.one_plus_one']
"ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ff0f188c95030158cc6398d2a6c55one",
res['paddle.one_plus_one'],
)
self.assertEqual(
"ff0f188c95030158cc6398d2a6c55two", res['paddle.two_plus_two']
"ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ff0f188c95030158cc6398d2a6c55two",
res['paddle.two_plus_two'],
)
self.assertEqual(
"ff0f188c95030158cc6398d2a6cthree", res['paddle.three_plus_three']
"ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ff0f188c95030158cc6398d2a6cthree",
res['paddle.three_plus_three'],
)
self.assertEqual(
"ff0f188c95030158cc6398d2a6c5four", res['paddle.four_plus_four']
)
self.assertEqual(
"ff0f188c95030158cc6398d2a6c5five", res['paddle.five_plus_five']
"ArgSpec(), ff0f188c95030158cc6398d2a6c5five",
res['paddle.five_plus_five'],
)


Expand Down Expand Up @@ -302,8 +306,8 @@ def test_global_exec(self):
>>> import paddle
>>> a = paddle.to_tensor(.2)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.20000000)
""",
'set_default': """
placeholder
Expand All @@ -319,8 +323,8 @@ def test_global_exec(self):
>>> paddle.set_default_dtype('float64')
>>> a = paddle.to_tensor(.2)
>>> print(a)
Tensor(shape=[1], dtype=float64, place=Place(cpu), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
0.20000000)
""",
'after_set_default': """
placeholder
Expand All @@ -335,8 +339,8 @@ def test_global_exec(self):
>>> import paddle
>>> a = paddle.to_tensor(.2)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.20000000)
""",
}

Expand Down Expand Up @@ -509,10 +513,10 @@ def test_patch_xdoctest(self):
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.20000000])
>>> # Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, 0.20000000)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
0.20000000)

""",
'cpu_to_cpu': """
Expand All @@ -528,10 +532,10 @@ def test_patch_xdoctest(self):
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, [0.20000000])
>>> # Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, 0.20000000)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.20000000)

""",
'gpu_to_cpu': """
Expand All @@ -547,10 +551,10 @@ def test_patch_xdoctest(self):
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.20000000])
>>> # Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, 0.20000000)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.20000000)

""",
'cpu_to_gpu': """
Expand All @@ -566,10 +570,10 @@ def test_patch_xdoctest(self):
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, [0.20000000])
>>> # Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, 0.20000000)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.20000000])
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
0.20000000)
""",
'gpu_to_cpu_array': """
placeholder
Expand Down Expand Up @@ -701,8 +705,8 @@ def test_patch_xdoctest(self):
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
0.123456780)

""",
'cpu_to_cpu': """
Expand All @@ -719,8 +723,8 @@ def test_patch_xdoctest(self):
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.123456780)

""",
'gpu_to_cpu': """
Expand All @@ -737,8 +741,8 @@ def test_patch_xdoctest(self):
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.123456780)

""",
'cpu_to_gpu': """
Expand All @@ -755,8 +759,8 @@ def test_patch_xdoctest(self):
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
0.123456780)
""",
'gpu_to_cpu_array': """
placeholder
Expand Down Expand Up @@ -2046,7 +2050,7 @@ def test_timeout(self):

def test_bad_statements(self):
docstrings_to_test = {
'bad_fluid': """
'good_fluid': """
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fluid 检查和单测是不是可以退场了?如果使用了 fluid 单测执行时就会挂掉

可以之后单独 PR 退场一下~

this is docstring...

Examples:
Expand Down Expand Up @@ -2191,9 +2195,9 @@ def test_bad_statements(self):
tr_10,
) = test_results

self.assertIn('bad_fluid', tr_0.name)
self.assertTrue(tr_0.badstatement)
self.assertFalse(tr_0.passed)
self.assertIn('good_fluid', tr_0.name)
self.assertFalse(tr_0.badstatement)
self.assertTrue(tr_0.passed)

self.assertIn('bad_fluid_from', tr_1.name)
self.assertTrue(tr_1.badstatement)
Expand Down
Loading