Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ repos:
# v8.2 has breaking changes. We work around them at runtime, but we need the newer stubs.
- packaging >= 22.0
- platformdirs >= 2.1.0
- pytokens >= 0.1.10
- pytokens >= 0.3.0
- pytest
- hypothesis
- aiohttp >= 3.7.4
Expand Down
1 change: 1 addition & 0 deletions CHANGES.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
<!-- Include any especially major or disruptive changes here -->

- Enable base 3.14 support (#4804)
- Add support for the new Python 3.14 t-string syntax introduced by PEP 750 (#4805)

### Stable style

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ dependencies = [
"packaging>=22.0",
"pathspec>=0.9.0",
"platformdirs>=2",
"pytokens>=0.1.10",
"pytokens>=0.3.0",
"tomli>=1.1.0; python_version < '3.11'",
"typing_extensions>=4.0.1; python_version < '3.11'",
]
Expand Down
3 changes: 3 additions & 0 deletions src/black/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1248,6 +1248,7 @@ def _format_str_once(
for feature in {
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
}
if supports_feature(versions, feature)
}
Expand Down Expand Up @@ -1364,6 +1365,8 @@ def get_features_used( # noqa: C901
for n in node.pre_order():
if n.type == token.FSTRING_START:
features.add(Feature.F_STRINGS)
elif n.type == token.TSTRING_START:
features.add(Feature.T_STRINGS)
elif (
n.type == token.RBRACE
and n.parent is not None
Expand Down
19 changes: 17 additions & 2 deletions src/black/linegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
WHITESPACE,
Visitor,
ensure_visible,
fstring_to_string,
fstring_tstring_to_string,
get_annotation_type,
has_sibling_with_type,
is_arith_like,
Expand Down Expand Up @@ -560,7 +560,22 @@ def visit_atom(self, node: Node) -> Iterator[Line]:

def visit_fstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split f-strings at all.
string_leaf = fstring_to_string(node)
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)

def visit_tstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split t-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
Expand Down
4 changes: 2 additions & 2 deletions src/black/lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ def append(
"""
has_value = (
leaf.type in BRACKETS
# empty fstring-middles must not be truncated
or leaf.type == token.FSTRING_MIDDLE
# empty fstring and tstring middles must not be truncated
or leaf.type in (token.FSTRING_MIDDLE, token.TSTRING_MIDDLE)
or bool(leaf.value.strip())
)
if not has_value:
Expand Down
7 changes: 3 additions & 4 deletions src/black/mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,10 @@ class Feature(Enum):
DEBUG_F_STRINGS = 16
PARENTHESIZED_CONTEXT_MANAGERS = 17
TYPE_PARAMS = 18
FSTRING_PARSING = 19
# FSTRING_PARSING = 19 # unused
TYPE_PARAM_DEFAULTS = 20
UNPARENTHESIZED_EXCEPT_TYPES = 21
T_STRINGS = 22
FORCE_OPTIONAL_PARENTHESES = 50

# __future__ flags
Expand Down Expand Up @@ -165,7 +166,6 @@ class Feature(Enum):
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.FSTRING_PARSING,
},
TargetVersion.PY313: {
Feature.F_STRINGS,
Expand All @@ -185,7 +185,6 @@ class Feature(Enum):
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.FSTRING_PARSING,
Feature.TYPE_PARAM_DEFAULTS,
},
TargetVersion.PY314: {
Expand All @@ -206,9 +205,9 @@ class Feature(Enum):
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.FSTRING_PARSING,
Feature.TYPE_PARAM_DEFAULTS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
},
}

Expand Down
14 changes: 9 additions & 5 deletions src/black/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,8 @@
STANDALONE_COMMENT,
token.FSTRING_MIDDLE,
token.FSTRING_END,
token.TSTRING_MIDDLE,
token.TSTRING_END,
token.BANG,
}

Expand Down Expand Up @@ -207,7 +209,10 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
}:
return NO

if t == token.LBRACE and p.type == syms.fstring_replacement_field:
if t == token.LBRACE and p.type in (
syms.fstring_replacement_field,
syms.tstring_replacement_field,
):
return NO

prev = leaf.prev_sibling
Expand Down Expand Up @@ -395,7 +400,6 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
return NO

# TODO: add fstring here?
elif t in {token.NAME, token.NUMBER, token.STRING}:
return NO

Expand Down Expand Up @@ -789,8 +793,8 @@ def is_fstring(node: Node) -> bool:
return node.type == syms.fstring


def fstring_to_string(node: Node) -> Leaf:
"""Converts an fstring node back to a string node."""
def fstring_tstring_to_string(node: Node) -> Leaf:
"""Converts an fstring or tstring node back to a string node."""
string_without_prefix = str(node)[len(node.prefix) :]
string_leaf = Leaf(token.STRING, string_without_prefix, prefix=node.prefix)
string_leaf.lineno = node.get_lineno() or 0
Expand All @@ -800,7 +804,7 @@ def fstring_to_string(node: Node) -> Leaf:
def is_multiline_string(node: LN) -> bool:
"""Return True if `leaf` is a multiline string that actually spans many lines."""
if isinstance(node, Node) and is_fstring(node):
leaf = fstring_to_string(node)
leaf = fstring_tstring_to_string(node)
elif isinstance(node, Leaf):
leaf = node
else:
Expand Down
2 changes: 1 addition & 1 deletion src/black/strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from black._width_table import WIDTH_TABLE
from blib2to3.pytree import Leaf

STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
STRING_PREFIX_CHARS: Final = "fturbFTURB" # All possible string prefix characters.
STRING_PREFIX_RE: Final = re.compile(
r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL
)
Expand Down
7 changes: 6 additions & 1 deletion src/blib2to3/Grammar.txt
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | (STRING | fstring)+ | '.' '.' '.')
NAME | NUMBER | (STRING | fstring | tstring)+ | '.' '.' '.')
listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] )
testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] )
lambdef: 'lambda' [varargslist] ':' test
Expand Down Expand Up @@ -259,3 +259,8 @@ fstring: FSTRING_START fstring_middle* FSTRING_END
fstring_middle: fstring_replacement_field | FSTRING_MIDDLE
fstring_replacement_field: '{' (yield_expr | testlist_star_expr) ['='] [ "!" NAME ] [ ':' fstring_format_spec* ] '}'
fstring_format_spec: FSTRING_MIDDLE | fstring_replacement_field

tstring: TSTRING_START tstring_middle* TSTRING_END
tstring_middle: tstring_replacement_field | TSTRING_MIDDLE
tstring_replacement_field: '{' (yield_expr | testlist_star_expr) ['='] [ "!" NAME ] [ ':' tstring_format_spec* ] '}'
tstring_format_spec: TSTRING_MIDDLE | tstring_replacement_field
10 changes: 7 additions & 3 deletions src/blib2to3/pgen2/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,13 @@ def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
if type in {token.INDENT, token.DEDENT}:
prefix = _prefix
lineno, column = end
# FSTRING_MIDDLE is the only token that can end with a newline, and
# `end` will point to the next line. For that case, don't increment lineno.
if value.endswith("\n") and type != token.FSTRING_MIDDLE:
# FSTRING_MIDDLE and TSTRING_MIDDLE are the only token that can end with a
# newline, and `end` will point to the next line. For that case, don't
# increment lineno.
if value.endswith("\n") and type not in (
token.FSTRING_MIDDLE,
token.TSTRING_MIDDLE,
):
lineno += 1
column = 0
else:
Expand Down
5 changes: 4 additions & 1 deletion src/blib2to3/pgen2/token.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@
FSTRING_MIDDLE: Final = 61
FSTRING_END: Final = 62
BANG: Final = 63
N_TOKENS: Final = 64
TSTRING_START: Final = 64
TSTRING_MIDDLE: Final = 65
TSTRING_END: Final = 66
N_TOKENS: Final = 67
NT_OFFSET: Final = 256
# --end constants--

Expand Down
10 changes: 9 additions & 1 deletion src/blib2to3/pgen2/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
COMMENT,
DEDENT,
ENDMARKER,
ERRORTOKEN,
FSTRING_END,
FSTRING_MIDDLE,
FSTRING_START,
Expand All @@ -49,6 +48,9 @@
NUMBER,
OP,
STRING,
TSTRING_END,
TSTRING_MIDDLE,
TSTRING_START,
tok_name,
)

Expand Down Expand Up @@ -91,6 +93,9 @@
TokenType.fstring_start: FSTRING_START,
TokenType.fstring_middle: FSTRING_MIDDLE,
TokenType.fstring_end: FSTRING_END,
TokenType.tstring_start: TSTRING_START,
TokenType.tstring_middle: TSTRING_MIDDLE,
TokenType.tstring_end: TSTRING_END,
TokenType.endmarker: ENDMARKER,
}

Expand Down Expand Up @@ -186,6 +191,9 @@ def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenIn
source_line,
)
else:
token_type = TOKEN_TYPE_MAP.get(token.type)
if token_type is None:
raise ValueError(f"Unknown token type: {token.type!r}")
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
Expand Down
4 changes: 4 additions & 0 deletions src/blib2to3/pygram.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,10 @@ class _python_symbols(Symbols):
tname_star: int
trailer: int
try_stmt: int
tstring: int
tstring_format_spec: int
tstring_middle: int
tstring_replacement_field: int
type_stmt: int
typedargslist: int
typeparam: int
Expand Down
83 changes: 83 additions & 0 deletions tests/data/cases/pep_750.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# flags: --minimum-version=3.14
x = t"foo"
x = t'foo {{ {2 + 2}bar {{ baz'

x = t"foo {f'abc'} bar"

x = t"""foo {{ a
foo {2 + 2}bar {{ baz

x = f"foo {{ {
2 + 2 # comment
}bar"

{{ baz

}} buzz

{print("abc" + "def"
)}
abc"""

t'{(abc:=10)}'

t'''This is a really long string, but just make sure that you reflow tstrings {
2+2:d
}'''
t'This is a really long string, but just make sure that you reflow tstrings correctly {2+2:d}'

t"{ 2 + 2 = }"

t'{
X
!r
}'

tr'\{{\}}'

t'''
WITH {f'''
{1}_cte AS ()'''}
'''

# output
x = t"foo"
x = t"foo {{ {2 + 2}bar {{ baz"

x = t"foo {f'abc'} bar"

x = t"""foo {{ a
foo {2 + 2}bar {{ baz

x = f"foo {{ {
2 + 2 # comment
}bar"

{{ baz

}} buzz

{print("abc" + "def"
)}
abc"""

t"{(abc:=10)}"

t"""This is a really long string, but just make sure that you reflow tstrings {
2+2:d
}"""
t"This is a really long string, but just make sure that you reflow tstrings correctly {2+2:d}"

t"{ 2 + 2 = }"

t"{
X
!r
}"

rt"\{{\}}"

t"""
WITH {f'''
{1}_cte AS ()'''}
"""
3 changes: 3 additions & 0 deletions tests/test_black.py
Original file line number Diff line number Diff line change
Expand Up @@ -898,6 +898,9 @@ def test_get_features_used(self) -> None:
self.check_features_used(
"with ((a, ((b as c)))): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"x = t'foo {f'bar'}'", {Feature.T_STRINGS, Feature.F_STRINGS}
)

def check_features_used(self, source: str, expected: set[Feature]) -> None:
node = black.lib2to3_parse(source)
Expand Down