Skip to content
Merged
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ repos:
# v8.2 has breaking changes. We work around them at runtime, but we need the newer stubs.
- packaging >= 22.0
- platformdirs >= 2.1.0
- pytokens >= 0.1.10
- pytokens @ git+https://github.com/tusharsadhwani/pytokens@py314
- pytest
- hypothesis
- aiohttp >= 3.7.4
Expand Down
1 change: 1 addition & 0 deletions CHANGES.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
<!-- Include any especially major or disruptive changes here -->

- Enable base 3.14 support (#4804)
- Add support for the new Python 3.14 t-string syntax introduced by PEP 750 (#4805)

### Stable style

Expand Down
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ dependencies = [
"packaging>=22.0",
"pathspec>=0.9.0",
"platformdirs>=2",
"pytokens>=0.1.10",
"pytokens @ git+https://github.com/tusharsadhwani/pytokens@py314",
"tomli>=1.1.0; python_version < '3.11'",
"typing_extensions>=4.0.1; python_version < '3.11'",
]
Expand Down Expand Up @@ -98,6 +98,9 @@ Changelog = "https://github.com/psf/black/blob/main/CHANGES.md"
Repository = "https://github.com/psf/black"
Issues = "https://github.com/psf/black/issues"

[tool.hatch.metadata]
allow-direct-references = true

[tool.hatch.metadata.hooks.fancy-pypi-readme]
content-type = "text/markdown"
fragments = [
Expand Down
19 changes: 17 additions & 2 deletions src/black/linegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
WHITESPACE,
Visitor,
ensure_visible,
fstring_to_string,
fstring_tstring_to_string,
get_annotation_type,
has_sibling_with_type,
is_arith_like,
Expand Down Expand Up @@ -533,7 +533,22 @@ def visit_atom(self, node: Node) -> Iterator[Line]:

def visit_fstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split f-strings at all.
string_leaf = fstring_to_string(node)
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)

def visit_tstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split t-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
Expand Down
4 changes: 2 additions & 2 deletions src/black/lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ def append(
"""
has_value = (
leaf.type in BRACKETS
# empty fstring-middles must not be truncated
or leaf.type == token.FSTRING_MIDDLE
# empty fstring and tstring middles must not be truncated
or leaf.type in (token.FSTRING_MIDDLE, token.TSTRING_MIDDLE)
or bool(leaf.value.strip())
)
if not has_value:
Expand Down
14 changes: 9 additions & 5 deletions src/black/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,8 @@
STANDALONE_COMMENT,
token.FSTRING_MIDDLE,
token.FSTRING_END,
token.TSTRING_MIDDLE,
token.TSTRING_END,
token.BANG,
}

Expand Down Expand Up @@ -207,7 +209,10 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
}:
return NO

if t == token.LBRACE and p.type == syms.fstring_replacement_field:
if t == token.LBRACE and p.type in (
syms.fstring_replacement_field,
syms.tstring_replacement_field,
):
return NO

prev = leaf.prev_sibling
Expand Down Expand Up @@ -395,7 +400,6 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
return NO

# TODO: add fstring here?
elif t in {token.NAME, token.NUMBER, token.STRING}:
return NO

Expand Down Expand Up @@ -789,8 +793,8 @@ def is_fstring(node: Node) -> bool:
return node.type == syms.fstring


def fstring_to_string(node: Node) -> Leaf:
"""Converts an fstring node back to a string node."""
def fstring_tstring_to_string(node: Node) -> Leaf:
"""Converts an fstring or tstring node back to a string node."""
string_without_prefix = str(node)[len(node.prefix) :]
string_leaf = Leaf(token.STRING, string_without_prefix, prefix=node.prefix)
string_leaf.lineno = node.get_lineno() or 0
Expand All @@ -800,7 +804,7 @@ def fstring_to_string(node: Node) -> Leaf:
def is_multiline_string(node: LN) -> bool:
"""Return True if `leaf` is a multiline string that actually spans many lines."""
if isinstance(node, Node) and is_fstring(node):
leaf = fstring_to_string(node)
leaf = fstring_tstring_to_string(node)
elif isinstance(node, Leaf):
leaf = node
else:
Expand Down
2 changes: 1 addition & 1 deletion src/black/strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from black._width_table import WIDTH_TABLE
from blib2to3.pytree import Leaf

STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
STRING_PREFIX_CHARS: Final = "fturbFTURB" # All possible string prefix characters.
STRING_PREFIX_RE: Final = re.compile(
r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL
)
Expand Down
7 changes: 6 additions & 1 deletion src/blib2to3/Grammar.txt
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | (STRING | fstring)+ | '.' '.' '.')
NAME | NUMBER | (STRING | fstring | tstring)+ | '.' '.' '.')
listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] )
testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] )
lambdef: 'lambda' [varargslist] ':' test
Expand Down Expand Up @@ -259,3 +259,8 @@ fstring: FSTRING_START fstring_middle* FSTRING_END
fstring_middle: fstring_replacement_field | FSTRING_MIDDLE
fstring_replacement_field: '{' (yield_expr | testlist_star_expr) ['='] [ "!" NAME ] [ ':' fstring_format_spec* ] '}'
fstring_format_spec: FSTRING_MIDDLE | fstring_replacement_field

tstring: TSTRING_START tstring_middle* TSTRING_END
tstring_middle: tstring_replacement_field | TSTRING_MIDDLE
tstring_replacement_field: '{' (yield_expr | testlist_star_expr) ['='] [ "!" NAME ] [ ':' tstring_format_spec* ] '}'
tstring_format_spec: TSTRING_MIDDLE | tstring_replacement_field
5 changes: 3 additions & 2 deletions src/blib2to3/pgen2/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,9 @@ def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
if type in {token.INDENT, token.DEDENT}:
prefix = _prefix
lineno, column = end
# FSTRING_MIDDLE is the only token that can end with a newline, and
# `end` will point to the next line. For that case, don't increment lineno.
# FSTRING_MIDDLE and TSTRING_MIDDLE are the only token that can end with a
# newline, and `end` will point to the next line. For that case, don't
# increment lineno.
if value.endswith("\n") and type != token.FSTRING_MIDDLE:
lineno += 1
column = 0
Expand Down
5 changes: 4 additions & 1 deletion src/blib2to3/pgen2/token.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@
FSTRING_MIDDLE: Final = 61
FSTRING_END: Final = 62
BANG: Final = 63
N_TOKENS: Final = 64
TSTRING_START: Final = 64
TSTRING_MIDDLE: Final = 65
TSTRING_END: Final = 66
N_TOKENS: Final = 67
NT_OFFSET: Final = 256
# --end constants--

Expand Down
10 changes: 9 additions & 1 deletion src/blib2to3/pgen2/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
COMMENT,
DEDENT,
ENDMARKER,
ERRORTOKEN,
FSTRING_END,
FSTRING_MIDDLE,
FSTRING_START,
Expand All @@ -49,6 +48,9 @@
NUMBER,
OP,
STRING,
TSTRING_END,
TSTRING_MIDDLE,
TSTRING_START,
tok_name,
)

Expand Down Expand Up @@ -91,6 +93,9 @@
TokenType.fstring_start: FSTRING_START,
TokenType.fstring_middle: FSTRING_MIDDLE,
TokenType.fstring_end: FSTRING_END,
TokenType.tstring_start: TSTRING_START,
TokenType.tstring_middle: TSTRING_MIDDLE,
TokenType.tstring_end: TSTRING_END,
TokenType.endmarker: ENDMARKER,
}

Expand Down Expand Up @@ -186,6 +191,9 @@ def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenIn
source_line,
)
else:
token_type = TOKEN_TYPE_MAP.get(token.type)
if token_type is None:
raise ValueError(f"Unknown token type: {token.type!r}")
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
Expand Down
4 changes: 4 additions & 0 deletions src/blib2to3/pygram.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,10 @@ class _python_symbols(Symbols):
tname_star: int
trailer: int
try_stmt: int
tstring: int
tstring_format_spec: int
tstring_middle: int
tstring_replacement_field: int
type_stmt: int
typedargslist: int
typeparam: int
Expand Down
7 changes: 7 additions & 0 deletions tests/data/cases/pep_750.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# flags: --minimum-version=3.14
x = t"foo"
x = t'foo {{ {2 + 2}bar {{ baz'

# output
x = t"foo"
x = t"foo {{ {2 + 2}bar {{ baz"