diff --git a/.cspell.json b/.cspell.json
index 6112c35d59..9f88a74f96 100644
--- a/.cspell.json
+++ b/.cspell.json
@@ -60,6 +60,7 @@
"dedentations",
"dedents",
"deduped",
+ "downcastable",
"downcasted",
"dumpable",
"emscripten",
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index c5e9344956..b0ff575f1e 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -113,7 +113,7 @@ jobs:
RUST_BACKTRACE: full
name: Run rust tests
runs-on: ${{ matrix.os }}
- timeout-minutes: ${{ contains(matrix.os, 'windows') && 40 || 30 }}
+ timeout-minutes: ${{ contains(matrix.os, 'windows') && 45 || 30 }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
@@ -239,7 +239,7 @@ jobs:
RUST_BACKTRACE: full
name: Run snippets and cpython tests
runs-on: ${{ matrix.os }}
- timeout-minutes: ${{ contains(matrix.os, 'windows') && 40 || 30 }}
+ timeout-minutes: ${{ contains(matrix.os, 'windows') && 45 || 30 }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
@@ -348,18 +348,24 @@ jobs:
name: Run tests under miri
runs-on: ubuntu-latest
timeout-minutes: 30
+ env:
+ NIGHTLY_CHANNEL: nightly
steps:
- uses: actions/checkout@v4
+
- uses: dtolnay/rust-toolchain@master
with:
- toolchain: nightly
+ toolchain: ${{ env.NIGHTLY_CHANNEL }}
components: miri
- uses: Swatinem/rust-cache@v2
+
- name: Run tests under miri
+ run: cargo +${{ env.NIGHTLY_CHANNEL }} miri test -p rustpython-vm -- miri_test
+ env:
# miri-ignore-leaks because the type-object circular reference means that there will always be
# a memory leak, at least until we have proper cyclic gc
- run: MIRIFLAGS='-Zmiri-ignore-leaks' cargo +nightly miri test -p rustpython-vm -- miri_test
+ MIRIFLAGS: '-Zmiri-ignore-leaks'
wasm:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') }}
diff --git a/Cargo.lock b/Cargo.lock
index c6dee806c7..5f240f4261 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1958,8 +1958,9 @@ checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "radium"
-version = "1.1.0"
-source = "git+https://github.com/youknowone/ferrilab?branch=fix-nightly#4a301c3a223e096626a2773d1a1eed1fc4e21140"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1775bc532a9bfde46e26eba441ca1171b91608d14a3bae71fea371f18a00cffe"
dependencies = [
"cfg-if",
]
@@ -2274,10 +2275,8 @@ dependencies = [
"num-traits",
"ruff_python_ast",
"ruff_python_parser",
- "ruff_source_file",
"ruff_text_size",
"rustpython-compiler-core",
- "rustpython-compiler-source",
"rustpython-literal",
"rustpython-wtf8",
"thiserror 2.0.12",
@@ -2324,7 +2323,6 @@ dependencies = [
"ruff_text_size",
"rustpython-codegen",
"rustpython-compiler-core",
- "rustpython-compiler-source",
"thiserror 2.0.12",
]
@@ -2342,14 +2340,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "rustpython-compiler-source"
-version = "0.4.0"
-dependencies = [
- "ruff_source_file",
- "ruff_text_size",
-]
-
[[package]]
name = "rustpython-derive"
version = "0.4.0"
@@ -2553,14 +2543,12 @@ dependencies = [
"result-like",
"ruff_python_ast",
"ruff_python_parser",
- "ruff_source_file",
"ruff_text_size",
"rustix",
"rustpython-codegen",
"rustpython-common",
"rustpython-compiler",
"rustpython-compiler-core",
- "rustpython-compiler-source",
"rustpython-derive",
"rustpython-jit",
"rustpython-literal",
diff --git a/Cargo.toml b/Cargo.toml
index 1fdc77d261..77ecb2a769 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -82,7 +82,6 @@ opt-level = 3
lto = "thin"
[patch.crates-io]
-radium = { version = "1.1.0", git = "https://github.com/youknowone/ferrilab", branch = "fix-nightly" }
# REDOX START, Uncomment when you want to compile/check with redoxer
# REDOX END
@@ -118,8 +117,20 @@ template = "installer-config/installer.wxs"
[workspace]
resolver = "2"
members = [
- "compiler", "compiler/core", "compiler/codegen", "compiler/literal", "compiler/source",
- ".", "common", "derive", "jit", "vm", "vm/sre_engine", "pylib", "stdlib", "derive-impl", "wtf8",
+ "compiler",
+ "compiler/core",
+ "compiler/codegen",
+ "compiler/literal",
+ ".",
+ "common",
+ "derive",
+ "jit",
+ "vm",
+ "vm/sre_engine",
+ "pylib",
+ "stdlib",
+ "derive-impl",
+ "wtf8",
"wasm/lib",
]
@@ -132,7 +143,6 @@ repository = "https://github.com/RustPython/RustPython"
license = "MIT"
[workspace.dependencies]
-rustpython-compiler-source = { path = "compiler/source" }
rustpython-compiler-core = { path = "compiler/core", version = "0.4.0" }
rustpython-compiler = { path = "compiler", version = "0.4.0" }
rustpython-codegen = { path = "compiler/codegen", version = "0.4.0" }
@@ -190,7 +200,7 @@ paste = "1.0.15"
proc-macro2 = "1.0.93"
pymath = "0.0.2"
quote = "1.0.38"
-radium = "1.1"
+radium = "1.1.1"
rand = "0.9"
rand_core = { version = "0.9", features = ["os_rng"] }
rustix = { version = "1.0", features = ["event"] }
diff --git a/Lib/_colorize.py b/Lib/_colorize.py
index 70acfd4ad0..9eb6f0933b 100644
--- a/Lib/_colorize.py
+++ b/Lib/_colorize.py
@@ -1,21 +1,64 @@
+from __future__ import annotations
import io
import os
import sys
COLORIZE = True
+# types
+if False:
+ from typing import IO
+
class ANSIColors:
- BOLD_GREEN = "\x1b[1;32m"
- BOLD_MAGENTA = "\x1b[1;35m"
- BOLD_RED = "\x1b[1;31m"
+ RESET = "\x1b[0m"
+
+ BLACK = "\x1b[30m"
+ BLUE = "\x1b[34m"
+ CYAN = "\x1b[36m"
GREEN = "\x1b[32m"
- GREY = "\x1b[90m"
MAGENTA = "\x1b[35m"
RED = "\x1b[31m"
- RESET = "\x1b[0m"
+ WHITE = "\x1b[37m" # more like LIGHT GRAY
YELLOW = "\x1b[33m"
+ BOLD_BLACK = "\x1b[1;30m" # DARK GRAY
+ BOLD_BLUE = "\x1b[1;34m"
+ BOLD_CYAN = "\x1b[1;36m"
+ BOLD_GREEN = "\x1b[1;32m"
+ BOLD_MAGENTA = "\x1b[1;35m"
+ BOLD_RED = "\x1b[1;31m"
+ BOLD_WHITE = "\x1b[1;37m" # actual WHITE
+ BOLD_YELLOW = "\x1b[1;33m"
+
+ # intense = like bold but without being bold
+ INTENSE_BLACK = "\x1b[90m"
+ INTENSE_BLUE = "\x1b[94m"
+ INTENSE_CYAN = "\x1b[96m"
+ INTENSE_GREEN = "\x1b[92m"
+ INTENSE_MAGENTA = "\x1b[95m"
+ INTENSE_RED = "\x1b[91m"
+ INTENSE_WHITE = "\x1b[97m"
+ INTENSE_YELLOW = "\x1b[93m"
+
+ BACKGROUND_BLACK = "\x1b[40m"
+ BACKGROUND_BLUE = "\x1b[44m"
+ BACKGROUND_CYAN = "\x1b[46m"
+ BACKGROUND_GREEN = "\x1b[42m"
+ BACKGROUND_MAGENTA = "\x1b[45m"
+ BACKGROUND_RED = "\x1b[41m"
+ BACKGROUND_WHITE = "\x1b[47m"
+ BACKGROUND_YELLOW = "\x1b[43m"
+
+ INTENSE_BACKGROUND_BLACK = "\x1b[100m"
+ INTENSE_BACKGROUND_BLUE = "\x1b[104m"
+ INTENSE_BACKGROUND_CYAN = "\x1b[106m"
+ INTENSE_BACKGROUND_GREEN = "\x1b[102m"
+ INTENSE_BACKGROUND_MAGENTA = "\x1b[105m"
+ INTENSE_BACKGROUND_RED = "\x1b[101m"
+ INTENSE_BACKGROUND_WHITE = "\x1b[107m"
+ INTENSE_BACKGROUND_YELLOW = "\x1b[103m"
+
NoColors = ANSIColors()
@@ -24,14 +67,16 @@ class ANSIColors:
setattr(NoColors, attr, "")
-def get_colors(colorize: bool = False, *, file=None) -> ANSIColors:
+def get_colors(
+ colorize: bool = False, *, file: IO[str] | IO[bytes] | None = None
+) -> ANSIColors:
if colorize or can_colorize(file=file):
return ANSIColors()
else:
return NoColors
-def can_colorize(*, file=None) -> bool:
+def can_colorize(*, file: IO[str] | IO[bytes] | None = None) -> bool:
if file is None:
file = sys.stdout
@@ -64,4 +109,4 @@ def can_colorize(*, file=None) -> bool:
try:
return os.isatty(file.fileno())
except io.UnsupportedOperation:
- return file.isatty()
+ return hasattr(file, "isatty") and file.isatty()
diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py
index a0b608380a..ff80180a79 100644
--- a/Lib/_pydecimal.py
+++ b/Lib/_pydecimal.py
@@ -13,104 +13,7 @@
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
-"""
-This is an implementation of decimal floating point arithmetic based on
-the General Decimal Arithmetic Specification:
-
- http://speleotrove.com/decimal/decarith.html
-
-and IEEE standard 854-1987:
-
- http://en.wikipedia.org/wiki/IEEE_854-1987
-
-Decimal floating point has finite precision with arbitrarily large bounds.
-
-The purpose of this module is to support arithmetic using familiar
-"schoolhouse" rules and to avoid some of the tricky representation
-issues associated with binary floating point. The package is especially
-useful for financial applications or for contexts where users have
-expectations that are at odds with binary floating point (for instance,
-in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
-of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
-Decimal('0.00')).
-
-Here are some examples of using the decimal module:
-
->>> from decimal import *
->>> setcontext(ExtendedContext)
->>> Decimal(0)
-Decimal('0')
->>> Decimal('1')
-Decimal('1')
->>> Decimal('-.0123')
-Decimal('-0.0123')
->>> Decimal(123456)
-Decimal('123456')
->>> Decimal('123.45e12345678')
-Decimal('1.2345E+12345680')
->>> Decimal('1.33') + Decimal('1.27')
-Decimal('2.60')
->>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
-Decimal('-2.20')
->>> dig = Decimal(1)
->>> print(dig / Decimal(3))
-0.333333333
->>> getcontext().prec = 18
->>> print(dig / Decimal(3))
-0.333333333333333333
->>> print(dig.sqrt())
-1
->>> print(Decimal(3).sqrt())
-1.73205080756887729
->>> print(Decimal(3) ** 123)
-4.85192780976896427E+58
->>> inf = Decimal(1) / Decimal(0)
->>> print(inf)
-Infinity
->>> neginf = Decimal(-1) / Decimal(0)
->>> print(neginf)
--Infinity
->>> print(neginf + inf)
-NaN
->>> print(neginf * inf)
--Infinity
->>> print(dig / 0)
-Infinity
->>> getcontext().traps[DivisionByZero] = 1
->>> print(dig / 0)
-Traceback (most recent call last):
- ...
- ...
- ...
-decimal.DivisionByZero: x / 0
->>> c = Context()
->>> c.traps[InvalidOperation] = 0
->>> print(c.flags[InvalidOperation])
-0
->>> c.divide(Decimal(0), Decimal(0))
-Decimal('NaN')
->>> c.traps[InvalidOperation] = 1
->>> print(c.flags[InvalidOperation])
-1
->>> c.flags[InvalidOperation] = 0
->>> print(c.flags[InvalidOperation])
-0
->>> print(c.divide(Decimal(0), Decimal(0)))
-Traceback (most recent call last):
- ...
- ...
- ...
-decimal.InvalidOperation: 0 / 0
->>> print(c.flags[InvalidOperation])
-1
->>> c.flags[InvalidOperation] = 0
->>> c.traps[InvalidOperation] = 0
->>> print(c.divide(Decimal(0), Decimal(0)))
-NaN
->>> print(c.flags[InvalidOperation])
-1
->>>
-"""
+"""Python decimal arithmetic module"""
__all__ = [
# Two major classes
@@ -140,8 +43,11 @@
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
- # C version: compile time choice that enables the thread local context
- 'HAVE_THREADS'
+ # C version: compile time choice that enables the thread local context (deprecated, now always true)
+ 'HAVE_THREADS',
+
+ # C version: compile time choice that enables the coroutine local context
+ 'HAVE_CONTEXTVAR'
]
__xname__ = __name__ # sys.modules lookup (--without-threads)
@@ -156,7 +62,7 @@
try:
from collections import namedtuple as _namedtuple
- DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
+ DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent', module='decimal')
except ImportError:
DecimalTuple = lambda *args: args
@@ -172,6 +78,7 @@
# Compatibility with the C version
HAVE_THREADS = True
+HAVE_CONTEXTVAR = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
@@ -190,7 +97,7 @@ class DecimalException(ArithmeticError):
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
- Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
+ Underflow (Inexact, Rounded, Subnormal)) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
@@ -238,7 +145,7 @@ class InvalidOperation(DecimalException):
x ** (+-)INF
An operand is invalid
- The result of the operation after these is a quiet positive NaN,
+ The result of the operation after this is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
@@ -431,82 +338,40 @@ class FloatOperation(DecimalException, TypeError):
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
-# current context. Py2.4 offers direct support for thread locals. If that
-# is not available, use threading.current_thread() which is slower but will
-# work for older Pythons. If threads are not part of the build, create a
-# mock threading object with threading.local() returning the module namespace.
-
-try:
- import threading
-except ImportError:
- # Python was compiled without threads; create a mock object instead
- class MockThreading(object):
- def local(self, sys=sys):
- return sys.modules[__xname__]
- threading = MockThreading()
- del MockThreading
-
-try:
- threading.local
-
-except AttributeError:
-
- # To fix reloading, force it to create a new context
- # Old contexts have different exceptions in their dicts, making problems.
- if hasattr(threading.current_thread(), '__decimal_context__'):
- del threading.current_thread().__decimal_context__
+# current context.
- def setcontext(context):
- """Set this thread's context to context."""
- if context in (DefaultContext, BasicContext, ExtendedContext):
- context = context.copy()
- context.clear_flags()
- threading.current_thread().__decimal_context__ = context
+import contextvars
- def getcontext():
- """Returns this thread's context.
+_current_context_var = contextvars.ContextVar('decimal_context')
- If this thread does not yet have a context, returns
- a new context and sets this thread's context.
- New contexts are copies of DefaultContext.
- """
- try:
- return threading.current_thread().__decimal_context__
- except AttributeError:
- context = Context()
- threading.current_thread().__decimal_context__ = context
- return context
+_context_attributes = frozenset(
+ ['prec', 'Emin', 'Emax', 'capitals', 'clamp', 'rounding', 'flags', 'traps']
+)
-else:
+def getcontext():
+ """Returns this thread's context.
- local = threading.local()
- if hasattr(local, '__decimal_context__'):
- del local.__decimal_context__
+ If this thread does not yet have a context, returns
+ a new context and sets this thread's context.
+ New contexts are copies of DefaultContext.
+ """
+ try:
+ return _current_context_var.get()
+ except LookupError:
+ context = Context()
+ _current_context_var.set(context)
+ return context
+
+def setcontext(context):
+ """Set this thread's context to context."""
+ if context in (DefaultContext, BasicContext, ExtendedContext):
+ context = context.copy()
+ context.clear_flags()
+ _current_context_var.set(context)
- def getcontext(_local=local):
- """Returns this thread's context.
+del contextvars # Don't contaminate the namespace
- If this thread does not yet have a context, returns
- a new context and sets this thread's context.
- New contexts are copies of DefaultContext.
- """
- try:
- return _local.__decimal_context__
- except AttributeError:
- context = Context()
- _local.__decimal_context__ = context
- return context
-
- def setcontext(context, _local=local):
- """Set this thread's context to context."""
- if context in (DefaultContext, BasicContext, ExtendedContext):
- context = context.copy()
- context.clear_flags()
- _local.__decimal_context__ = context
-
- del threading, local # Don't contaminate the namespace
-
-def localcontext(ctx=None):
+def localcontext(ctx=None, **kwargs):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
@@ -542,8 +407,14 @@ def sin(x):
>>> print(getcontext().prec)
28
"""
- if ctx is None: ctx = getcontext()
- return _ContextManager(ctx)
+ if ctx is None:
+ ctx = getcontext()
+ ctx_manager = _ContextManager(ctx)
+ for key, value in kwargs.items():
+ if key not in _context_attributes:
+ raise TypeError(f"'{key}' is an invalid keyword argument for this function")
+ setattr(ctx_manager.new_context, key, value)
+ return ctx_manager
##### Decimal class #######################################################
@@ -553,7 +424,7 @@ def sin(x):
# numbers.py for more detail.
class Decimal(object):
- """Floating point class for decimal arithmetic."""
+ """Floating-point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
@@ -993,7 +864,7 @@ def __hash__(self):
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
- return _PyHASH_NAN
+ return object.__hash__(self)
else:
if self._sign:
return -_PyHASH_INF
@@ -1674,13 +1545,13 @@ def __int__(self):
__trunc__ = __int__
+ @property
def real(self):
return self
- real = property(real)
+ @property
def imag(self):
return Decimal(0)
- imag = property(imag)
def conjugate(self):
return self
@@ -2260,10 +2131,16 @@ def _power_exact(self, other, p):
else:
return None
- if xc >= 10**p:
+ # An exact power of 10 is representable, but can convert to a
+ # string of any length. But an exact power of 10 shouldn't be
+ # possible at this point.
+ assert xc > 1, self
+ assert xc % 10 != 0, self
+ strxc = str(xc)
+ if len(strxc) > p:
return None
xe = -e-xe
- return _dec_from_triple(0, str(xc), xe)
+ return _dec_from_triple(0, strxc, xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
@@ -2272,7 +2149,7 @@ def _power_exact(self, other, p):
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
- if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
+ if len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
@@ -2285,7 +2162,7 @@ def _power_exact(self, other, p):
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
- if xc != 1 and xc_bits <= n:
+ if xc_bits <= n:
return None
xe, rem = divmod(xe, n)
@@ -2313,13 +2190,18 @@ def _power_exact(self, other, p):
return None
xc = xc**m
xe *= m
- if xc > 10**p:
+ # An exact power of 10 is representable, but can convert to a string
+ # of any length. But an exact power of 10 shouldn't be possible at
+ # this point.
+ assert xc > 1, self
+ assert xc % 10 != 0, self
+ str_xc = str(xc)
+ if len(str_xc) > p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
- str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
@@ -3837,6 +3719,10 @@ def __format__(self, specifier, context=None, _localeconv=None):
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
+ if not self and spec['no_neg_0'] and self._sign:
+ adjusted_sign = 0
+ else:
+ adjusted_sign = self._sign
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
@@ -3867,7 +3753,7 @@ def __format__(self, specifier, context=None, _localeconv=None):
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
- return _format_number(self._sign, intpart, fracpart, exp, spec)
+ return _format_number(adjusted_sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
@@ -5677,8 +5563,6 @@ def __init__(self, value=None):
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
- __str__ = __repr__
-
def _normalize(op1, op2, prec = 0):
@@ -6187,7 +6071,7 @@ def _convert_for_comparison(self, other, equality_op=False):
#
# A format specifier for Decimal looks like:
#
-# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
+# [[fill]align][sign][z][#][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
@@ -6195,6 +6079,7 @@ def _convert_for_comparison(self, other, equality_op=False):
(?P[<>=^])
)?
(?P[-+ ])?
+(?Pz)?
(?P\#)?
(?P0)?
(?P(?!0)\d+)?
diff --git a/Lib/_weakrefset.py b/Lib/_weakrefset.py
index 2a27684324..489eec714e 100644
--- a/Lib/_weakrefset.py
+++ b/Lib/_weakrefset.py
@@ -80,8 +80,7 @@ def __contains__(self, item):
return wr in self.data
def __reduce__(self):
- return (self.__class__, (list(self),),
- getattr(self, '__dict__', None))
+ return self.__class__, (list(self),), self.__getstate__()
def add(self, item):
if self._pending_removals:
diff --git a/Lib/ast.py b/Lib/ast.py
index 07044706dc..37b20206b8 100644
--- a/Lib/ast.py
+++ b/Lib/ast.py
@@ -1,28 +1,24 @@
"""
- ast
- ~~~
-
- The `ast` module helps Python applications to process trees of the Python
- abstract syntax grammar. The abstract syntax itself might change with
- each Python release; this module helps to find out programmatically what
- the current grammar looks like and allows modifications of it.
-
- An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
- a flag to the `compile()` builtin function or by using the `parse()`
- function from this module. The result will be a tree of objects whose
- classes all inherit from `ast.AST`.
-
- A modified abstract syntax tree can be compiled into a Python code object
- using the built-in `compile()` function.
-
- Additionally various helper functions are provided that make working with
- the trees simpler. The main intention of the helper functions and this
- module in general is to provide an easy to use interface for libraries
- that work tightly with the python syntax (template engines for example).
-
-
- :copyright: Copyright 2008 by Armin Ronacher.
- :license: Python License.
+The `ast` module helps Python applications to process trees of the Python
+abstract syntax grammar. The abstract syntax itself might change with
+each Python release; this module helps to find out programmatically what
+the current grammar looks like and allows modifications of it.
+
+An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
+a flag to the `compile()` builtin function or by using the `parse()`
+function from this module. The result will be a tree of objects whose
+classes all inherit from `ast.AST`.
+
+A modified abstract syntax tree can be compiled into a Python code object
+using the built-in `compile()` function.
+
+Additionally various helper functions are provided that make working with
+the trees simpler. The main intention of the helper functions and this
+module in general is to provide an easy to use interface for libraries
+that work tightly with the python syntax (template engines for example).
+
+:copyright: Copyright 2008 by Armin Ronacher.
+:license: Python License.
"""
import sys
import re
@@ -32,13 +28,15 @@
def parse(source, filename='', mode='exec', *,
- type_comments=False, feature_version=None):
+ type_comments=False, feature_version=None, optimize=-1):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
Pass type_comments=True to get back type comments where the syntax allows.
"""
flags = PyCF_ONLY_AST
+ if optimize > 0:
+ flags |= PyCF_OPTIMIZED_AST
if type_comments:
flags |= PyCF_TYPE_COMMENTS
if feature_version is None:
@@ -50,7 +48,7 @@ def parse(source, filename='', mode='exec', *,
feature_version = minor
# Else it should be an int giving the minor version for 3.x.
return compile(source, filename, mode, flags,
- _feature_version=feature_version)
+ _feature_version=feature_version, optimize=optimize)
def literal_eval(node_or_string):
@@ -112,7 +110,11 @@ def _convert(node):
return _convert(node_or_string)
-def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
+def dump(
+ node, annotate_fields=True, include_attributes=False,
+ *,
+ indent=None, show_empty=False,
+):
"""
Return a formatted dump of the tree in node. This is mainly useful for
debugging purposes. If annotate_fields is true (by default),
@@ -123,6 +125,8 @@ def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
include_attributes can be set to true. If indent is a non-negative
integer or string, then the tree will be pretty-printed with that indent
level. None (the default) selects the single line representation.
+ If show_empty is False, then empty lists and fields that are None
+ will be omitted from the output for better readability.
"""
def _format(node, level=0):
if indent is not None:
@@ -135,6 +139,7 @@ def _format(node, level=0):
if isinstance(node, AST):
cls = type(node)
args = []
+ args_buffer = []
allsimple = True
keywords = annotate_fields
for name in node._fields:
@@ -146,6 +151,16 @@ def _format(node, level=0):
if value is None and getattr(cls, name, ...) is None:
keywords = True
continue
+ if not show_empty:
+ if value == []:
+ field_type = cls._field_types.get(name, object)
+ if getattr(field_type, '__origin__', ...) is list:
+ if not keywords:
+ args_buffer.append(repr(value))
+ continue
+ if not keywords:
+ args.extend(args_buffer)
+ args_buffer = []
value, simple = _format(value, level)
allsimple = allsimple and simple
if keywords:
@@ -726,12 +741,11 @@ class _Unparser(NodeVisitor):
output source code for the abstract syntax; original formatting
is disregarded."""
- def __init__(self, *, _avoid_backslashes=False):
+ def __init__(self):
self._source = []
self._precedences = {}
self._type_ignores = {}
self._indent = 0
- self._avoid_backslashes = _avoid_backslashes
self._in_try_star = False
def interleave(self, inter, f, seq):
@@ -1104,12 +1118,21 @@ def visit_TypeVar(self, node):
if node.bound:
self.write(": ")
self.traverse(node.bound)
+ if node.default_value:
+ self.write(" = ")
+ self.traverse(node.default_value)
def visit_TypeVarTuple(self, node):
self.write("*" + node.name)
+ if node.default_value:
+ self.write(" = ")
+ self.traverse(node.default_value)
def visit_ParamSpec(self, node):
self.write("**" + node.name)
+ if node.default_value:
+ self.write(" = ")
+ self.traverse(node.default_value)
def visit_TypeAlias(self, node):
self.fill("type ")
@@ -1246,9 +1269,14 @@ def visit_JoinedStr(self, node):
fallback_to_repr = True
break
quote_types = new_quote_types
- elif "\n" in value:
- quote_types = [q for q in quote_types if q in _MULTI_QUOTES]
- assert quote_types
+ else:
+ if "\n" in value:
+ quote_types = [q for q in quote_types if q in _MULTI_QUOTES]
+ assert quote_types
+
+ new_quote_types = [q for q in quote_types if q not in value]
+ if new_quote_types:
+ quote_types = new_quote_types
new_fstring_parts.append(value)
if fallback_to_repr:
@@ -1268,13 +1296,19 @@ def visit_JoinedStr(self, node):
quote_type = quote_types[0]
self.write(f"{quote_type}{value}{quote_type}")
- def _write_fstring_inner(self, node):
+ def _write_fstring_inner(self, node, is_format_spec=False):
if isinstance(node, JoinedStr):
# for both the f-string itself, and format_spec
for value in node.values:
- self._write_fstring_inner(value)
+ self._write_fstring_inner(value, is_format_spec=is_format_spec)
elif isinstance(node, Constant) and isinstance(node.value, str):
value = node.value.replace("{", "{{").replace("}", "}}")
+
+ if is_format_spec:
+ value = value.replace("\\", "\\\\")
+ value = value.replace("'", "\\'")
+ value = value.replace('"', '\\"')
+ value = value.replace("\n", "\\n")
self.write(value)
elif isinstance(node, FormattedValue):
self.visit_FormattedValue(node)
@@ -1297,7 +1331,7 @@ def unparse_inner(inner):
self.write(f"!{chr(node.conversion)}")
if node.format_spec:
self.write(":")
- self._write_fstring_inner(node.format_spec)
+ self._write_fstring_inner(node.format_spec, is_format_spec=True)
def visit_Name(self, node):
self.write(node.id)
@@ -1317,8 +1351,6 @@ def _write_constant(self, value):
.replace("inf", _INFSTR)
.replace("nan", f"({_INFSTR}-{_INFSTR})")
)
- elif self._avoid_backslashes and isinstance(value, str):
- self._write_str_avoiding_backslashes(value)
else:
self.write(repr(value))
@@ -1805,8 +1837,7 @@ def main():
import argparse
parser = argparse.ArgumentParser(prog='python -m ast')
- parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?',
- default='-',
+ parser.add_argument('infile', nargs='?', default='-',
help='the file to parse; defaults to stdin')
parser.add_argument('-m', '--mode', default='exec',
choices=('exec', 'single', 'eval', 'func_type'),
@@ -1820,9 +1851,14 @@ def main():
help='indentation of nodes (number of spaces)')
args = parser.parse_args()
- with args.infile as infile:
- source = infile.read()
- tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments)
+ if args.infile == '-':
+ name = ''
+ source = sys.stdin.buffer.read()
+ else:
+ name = args.infile
+ with open(args.infile, 'rb') as infile:
+ source = infile.read()
+ tree = parse(source, name, args.mode, type_comments=args.no_type_comments)
print(dump(tree, include_attributes=args.include_attributes, indent=args.indent))
if __name__ == '__main__':
diff --git a/Lib/codeop.py b/Lib/codeop.py
index eea6cbc701..adf000ba29 100644
--- a/Lib/codeop.py
+++ b/Lib/codeop.py
@@ -44,6 +44,7 @@
# Caveat emptor: These flags are undocumented on purpose and depending
# on their effect outside the standard library is **unsupported**.
PyCF_DONT_IMPLY_DEDENT = 0x200
+PyCF_ONLY_AST = 0x400
PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000
def _maybe_compile(compiler, source, filename, symbol):
@@ -73,15 +74,6 @@ def _maybe_compile(compiler, source, filename, symbol):
return compiler(source, filename, symbol, incomplete_input=False)
-def _is_syntax_error(err1, err2):
- rep1 = repr(err1)
- rep2 = repr(err2)
- if "was never closed" in rep1 and "was never closed" in rep2:
- return False
- if rep1 == rep2:
- return True
- return False
-
def _compile(source, filename, symbol, incomplete_input=True):
flags = 0
if incomplete_input:
@@ -89,7 +81,6 @@ def _compile(source, filename, symbol, incomplete_input=True):
flags |= PyCF_DONT_IMPLY_DEDENT
return compile(source, filename, symbol, flags)
-
def compile_command(source, filename="", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
@@ -119,12 +110,14 @@ class Compile:
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT
- def __call__(self, source, filename, symbol, **kwargs):
- flags = self.flags
+ def __call__(self, source, filename, symbol, flags=0, **kwargs):
+ flags |= self.flags
if kwargs.get('incomplete_input', True) is False:
flags &= ~PyCF_DONT_IMPLY_DEDENT
flags &= ~PyCF_ALLOW_INCOMPLETE_INPUT
codeob = compile(source, filename, symbol, flags, True)
+ if flags & PyCF_ONLY_AST:
+ return codeob # this is an ast.Module in this case
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
diff --git a/Lib/csv.py b/Lib/csv.py
index 77f30c8d2b..cd20265987 100644
--- a/Lib/csv.py
+++ b/Lib/csv.py
@@ -1,28 +1,90 @@
-"""
-csv.py - read/write/investigate CSV files
+r"""
+CSV parsing and writing.
+
+This module provides classes that assist in the reading and writing
+of Comma Separated Value (CSV) files, and implements the interface
+described by PEP 305. Although many CSV files are simple to parse,
+the format is not formally defined by a stable specification and
+is subtle enough that parsing lines of a CSV file with something
+like line.split(",") is bound to fail. The module supports three
+basic APIs: reading, writing, and registration of dialects.
+
+
+DIALECT REGISTRATION:
+
+Readers and writers support a dialect argument, which is a convenient
+handle on a group of settings. When the dialect argument is a string,
+it identifies one of the dialects previously registered with the module.
+If it is a class or instance, the attributes of the argument are used as
+the settings for the reader or writer:
+
+ class excel:
+ delimiter = ','
+ quotechar = '"'
+ escapechar = None
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\r\n'
+ quoting = QUOTE_MINIMAL
+
+SETTINGS:
+
+ * quotechar - specifies a one-character string to use as the
+ quoting character. It defaults to '"'.
+ * delimiter - specifies a one-character string to use as the
+ field separator. It defaults to ','.
+ * skipinitialspace - specifies how to interpret spaces which
+ immediately follow a delimiter. It defaults to False, which
+ means that spaces immediately following a delimiter is part
+ of the following field.
+ * lineterminator - specifies the character sequence which should
+ terminate rows.
+ * quoting - controls when quotes should be generated by the writer.
+ It can take on any of the following module constants:
+
+ csv.QUOTE_MINIMAL means only when required, for example, when a
+ field contains either the quotechar or the delimiter
+ csv.QUOTE_ALL means that quotes are always placed around fields.
+ csv.QUOTE_NONNUMERIC means that quotes are always placed around
+ fields which do not parse as integers or floating-point
+ numbers.
+ csv.QUOTE_STRINGS means that quotes are always placed around
+ fields which are strings. Note that the Python value None
+ is not a string.
+ csv.QUOTE_NOTNULL means that quotes are only placed around fields
+ that are not the Python value None.
+ csv.QUOTE_NONE means that quotes are never placed around fields.
+ * escapechar - specifies a one-character string used to escape
+ the delimiter when quoting is set to QUOTE_NONE.
+ * doublequote - controls the handling of quotes inside fields. When
+ True, two consecutive quotes are interpreted as one during read,
+ and when writing, each quote character embedded in the data is
+ written as two quotes
"""
import re
import types
-from _csv import Error, __version__, writer, reader, register_dialect, \
+from _csv import Error, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
- QUOTE_STRINGS, QUOTE_NOTNULL, \
- __doc__
+ QUOTE_STRINGS, QUOTE_NOTNULL
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"QUOTE_STRINGS", "QUOTE_NOTNULL",
- "Error", "Dialect", "__doc__", "excel", "excel_tab",
+ "Error", "Dialect", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
- "unregister_dialect", "__version__", "DictReader", "DictWriter",
+ "unregister_dialect", "DictReader", "DictWriter",
"unix_dialect"]
+__version__ = "1.0"
+
+
class Dialect:
"""Describe a CSV dialect.
@@ -51,8 +113,8 @@ def _validate(self):
try:
_Dialect(self)
except TypeError as e:
- # We do this for compatibility with py2.3
- raise Error(str(e))
+ # Re-raise to get a traceback showing more user code.
+ raise Error(str(e)) from None
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
diff --git a/Lib/decimal.py b/Lib/decimal.py
index 7746ea2601..ee3147f5dd 100644
--- a/Lib/decimal.py
+++ b/Lib/decimal.py
@@ -1,11 +1,109 @@
+"""Decimal fixed-point and floating-point arithmetic.
+
+This is an implementation of decimal floating-point arithmetic based on
+the General Decimal Arithmetic Specification:
+
+ http://speleotrove.com/decimal/decarith.html
+
+and IEEE standard 854-1987:
+
+ http://en.wikipedia.org/wiki/IEEE_854-1987
+
+Decimal floating point has finite precision with arbitrarily large bounds.
+
+The purpose of this module is to support arithmetic using familiar
+"schoolhouse" rules and to avoid some of the tricky representation
+issues associated with binary floating point. The package is especially
+useful for financial applications or for contexts where users have
+expectations that are at odds with binary floating point (for instance,
+in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
+of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
+Decimal('0.00')).
+
+Here are some examples of using the decimal module:
+
+>>> from decimal import *
+>>> setcontext(ExtendedContext)
+>>> Decimal(0)
+Decimal('0')
+>>> Decimal('1')
+Decimal('1')
+>>> Decimal('-.0123')
+Decimal('-0.0123')
+>>> Decimal(123456)
+Decimal('123456')
+>>> Decimal('123.45e12345678')
+Decimal('1.2345E+12345680')
+>>> Decimal('1.33') + Decimal('1.27')
+Decimal('2.60')
+>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
+Decimal('-2.20')
+>>> dig = Decimal(1)
+>>> print(dig / Decimal(3))
+0.333333333
+>>> getcontext().prec = 18
+>>> print(dig / Decimal(3))
+0.333333333333333333
+>>> print(dig.sqrt())
+1
+>>> print(Decimal(3).sqrt())
+1.73205080756887729
+>>> print(Decimal(3) ** 123)
+4.85192780976896427E+58
+>>> inf = Decimal(1) / Decimal(0)
+>>> print(inf)
+Infinity
+>>> neginf = Decimal(-1) / Decimal(0)
+>>> print(neginf)
+-Infinity
+>>> print(neginf + inf)
+NaN
+>>> print(neginf * inf)
+-Infinity
+>>> print(dig / 0)
+Infinity
+>>> getcontext().traps[DivisionByZero] = 1
+>>> print(dig / 0)
+Traceback (most recent call last):
+ ...
+ ...
+ ...
+decimal.DivisionByZero: x / 0
+>>> c = Context()
+>>> c.traps[InvalidOperation] = 0
+>>> print(c.flags[InvalidOperation])
+0
+>>> c.divide(Decimal(0), Decimal(0))
+Decimal('NaN')
+>>> c.traps[InvalidOperation] = 1
+>>> print(c.flags[InvalidOperation])
+1
+>>> c.flags[InvalidOperation] = 0
+>>> print(c.flags[InvalidOperation])
+0
+>>> print(c.divide(Decimal(0), Decimal(0)))
+Traceback (most recent call last):
+ ...
+ ...
+ ...
+decimal.InvalidOperation: 0 / 0
+>>> print(c.flags[InvalidOperation])
+1
+>>> c.flags[InvalidOperation] = 0
+>>> c.traps[InvalidOperation] = 0
+>>> print(c.divide(Decimal(0), Decimal(0)))
+NaN
+>>> print(c.flags[InvalidOperation])
+1
+>>>
+"""
try:
from _decimal import *
- from _decimal import __doc__
from _decimal import __version__
from _decimal import __libmpdec_version__
except ImportError:
- from _pydecimal import *
- from _pydecimal import __doc__
- from _pydecimal import __version__
- from _pydecimal import __libmpdec_version__
+ import _pydecimal
+ import sys
+ _pydecimal.__doc__ = __doc__
+ sys.modules[__name__] = _pydecimal
diff --git a/Lib/html/__init__.py b/Lib/html/__init__.py
index da0a0a3ce7..1543460ca3 100644
--- a/Lib/html/__init__.py
+++ b/Lib/html/__init__.py
@@ -25,7 +25,7 @@ def escape(s, quote=True):
return s
-# see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references
+# see https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state
_invalid_charrefs = {
0x00: '\ufffd', # REPLACEMENT CHARACTER
diff --git a/Lib/html/entities.py b/Lib/html/entities.py
index dc508631ac..eb6dc12190 100644
--- a/Lib/html/entities.py
+++ b/Lib/html/entities.py
@@ -3,8 +3,7 @@
__all__ = ['html5', 'name2codepoint', 'codepoint2name', 'entitydefs']
-# maps the HTML entity name to the Unicode code point
-# from https://html.spec.whatwg.org/multipage/named-characters.html
+# maps HTML4 entity name to the Unicode code point
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
@@ -261,7 +260,11 @@
}
-# maps the HTML5 named character references to the equivalent Unicode character(s)
+# HTML5 named character references
+# Generated by Tools/build/parse_html5_entities.py
+# from https://html.spec.whatwg.org/entities.json and
+# https://html.spec.whatwg.org/multipage/named-characters.html.
+# Map HTML5 named character references to the equivalent Unicode character(s).
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
diff --git a/Lib/html/parser.py b/Lib/html/parser.py
index bef0f4fe4b..1e30956fe2 100644
--- a/Lib/html/parser.py
+++ b/Lib/html/parser.py
@@ -12,6 +12,7 @@
import _markupbase
from html import unescape
+from html.entities import html5 as html5_entities
__all__ = ['HTMLParser']
@@ -23,6 +24,7 @@
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+attr_charref = re.compile(r'&(#[0-9]+|#[xX][0-9a-fA-F]+|[a-zA-Z][a-zA-Z0-9]*)[;=]?')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
@@ -57,6 +59,22 @@
# and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+# Character reference processing logic specific to attribute values
+# See: https://html.spec.whatwg.org/multipage/parsing.html#named-character-reference-state
+def _replace_attr_charref(match):
+ ref = match.group(0)
+ # Numeric / hex char refs must always be unescaped
+ if ref.startswith(''):
+ return unescape(ref)
+ # Named character / entity references must only be unescaped
+ # if they are an exact match, and they are not followed by an equals sign
+ if not ref.endswith('=') and ref[1:] in html5_entities:
+ return unescape(ref)
+ # Otherwise do not unescape
+ return ref
+
+def _unescape_attrvalue(s):
+ return attr_charref.sub(_replace_attr_charref, s)
class HTMLParser(_markupbase.ParserBase):
@@ -89,6 +107,7 @@ def __init__(self, *, convert_charrefs=True):
If convert_charrefs is True (the default), all character references
are automatically converted to the corresponding Unicode characters.
"""
+ super().__init__()
self.convert_charrefs = convert_charrefs
self.reset()
@@ -98,7 +117,7 @@ def reset(self):
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
- _markupbase.ParserBase.reset(self)
+ super().reset()
def feed(self, data):
r"""Feed data to the parser.
@@ -241,7 +260,7 @@ def goahead(self, end):
else:
assert 0, "interesting.search() lied"
# end while
- if end and i < n and not self.cdata_elem:
+ if end and i < n:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:n]))
else:
@@ -259,7 +278,7 @@ def parse_html_declaration(self, i):
if rawdata[i:i+4] == ' 999_998_503 ns--> 999 ms
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
+ self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0
+ if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000:
+ # ns -> sec conversion can round up, e.g:
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
+ self.msecs = 0.0
+
+ self.relativeCreated = (ct - _startTime) / 1e6
if logThreads:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
@@ -378,7 +386,7 @@ def __init__(self, name, level, pathname, lineno,
def __repr__(self):
return ''%(self.name, self.levelno,
- self.pathname, self.lineno, self.msg)
+ self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
@@ -572,7 +580,7 @@ class Formatter(object):
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
- %(created)f Time when the LogRecord was created (time.time()
+ %(created)f Time when the LogRecord was created (time.time_ns() / 1e9
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
@@ -583,6 +591,7 @@ class Formatter(object):
%(threadName)s Thread name (if available)
%(taskName)s Task name (if available)
%(process)d Process ID (if available)
+ %(processName)s Process name (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
@@ -608,7 +617,7 @@ def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *,
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
- _STYLES.keys()))
+ _STYLES.keys()))
self._style = _STYLES[style][0](fmt, defaults=defaults)
if validate:
self._style.validate()
@@ -658,7 +667,7 @@ def formatException(self, ei):
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
- traceback.print_exception(ei[0], ei[1], tb, None, sio)
+ traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
@@ -879,25 +888,20 @@ def _removeHandlerRef(wr):
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
- acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
- if acquire and release and handlers:
- acquire()
- try:
- handlers.remove(wr)
- except ValueError:
- pass
- finally:
- release()
+ handlers, lock = _handlerList, _lock
+ if lock and handlers:
+ with lock:
+ try:
+ handlers.remove(wr)
+ except ValueError:
+ pass
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
- _acquireLock()
- try:
+ with _lock:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
- finally:
- _releaseLock()
def getHandlerByName(name):
@@ -912,8 +916,7 @@ def getHandlerNames():
"""
Return all known handler names as an immutable set.
"""
- result = set(_handlers.keys())
- return frozenset(result)
+ return frozenset(_handlers)
class Handler(Filterer):
@@ -943,15 +946,12 @@ def get_name(self):
return self._name
def set_name(self, name):
- _acquireLock()
- try:
+ with _lock:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
- finally:
- _releaseLock()
name = property(get_name, set_name)
@@ -1023,11 +1023,8 @@ def handle(self, record):
if isinstance(rv, LogRecord):
record = rv
if rv:
- self.acquire()
- try:
+ with self.lock:
self.emit(record)
- finally:
- self.release()
return rv
def setFormatter(self, fmt):
@@ -1055,13 +1052,10 @@ def close(self):
methods.
"""
#get the module data lock, as we're updating a shared structure.
- _acquireLock()
- try: #unlikely to raise an exception, but you never know...
+ with _lock:
self._closed = True
if self._name and self._name in _handlers:
del _handlers[self._name]
- finally:
- _releaseLock()
def handleError(self, record):
"""
@@ -1076,14 +1070,14 @@ def handleError(self, record):
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
- t, v, tb = sys.exc_info()
+ exc = sys.exception()
try:
sys.stderr.write('--- Logging error ---\n')
- traceback.print_exception(t, v, tb, None, sys.stderr)
+ traceback.print_exception(exc, limit=None, file=sys.stderr)
sys.stderr.write('Call stack:\n')
# Walk the stack frame up until we're out of logging,
# so as to print the calling context.
- frame = tb.tb_frame
+ frame = exc.__traceback__.tb_frame
while (frame and os.path.dirname(frame.f_code.co_filename) ==
__path__[0]):
frame = frame.f_back
@@ -1092,7 +1086,7 @@ def handleError(self, record):
else:
# couldn't find the right stack frame, for some reason
sys.stderr.write('Logged from file %s, line %s\n' % (
- record.filename, record.lineno))
+ record.filename, record.lineno))
# Issue 18671: output logging message and arguments
try:
sys.stderr.write('Message: %r\n'
@@ -1104,11 +1098,11 @@ def handleError(self, record):
sys.stderr.write('Unable to print the message and arguments'
' - possible formatting error.\nUse the'
' traceback above to help find the error.\n'
- )
+ )
except OSError: #pragma: no cover
pass # see issue 5971
finally:
- del t, v, tb
+ del exc
def __repr__(self):
level = getLevelName(self.level)
@@ -1138,12 +1132,9 @@ def flush(self):
"""
Flushes the stream.
"""
- self.acquire()
- try:
+ with self.lock:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
- finally:
- self.release()
def emit(self, record):
"""
@@ -1179,12 +1170,9 @@ def setStream(self, stream):
result = None
else:
result = self.stream
- self.acquire()
- try:
+ with self.lock:
self.flush()
self.stream = stream
- finally:
- self.release()
return result
def __repr__(self):
@@ -1234,8 +1222,7 @@ def close(self):
"""
Closes the stream.
"""
- self.acquire()
- try:
+ with self.lock:
try:
if self.stream:
try:
@@ -1251,8 +1238,6 @@ def close(self):
# Also see Issue #42378: we also rely on
# self._closed being set to True there
StreamHandler.close(self)
- finally:
- self.release()
def _open(self):
"""
@@ -1388,8 +1373,7 @@ def getLogger(self, name):
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
- _acquireLock()
- try:
+ with _lock:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
@@ -1404,8 +1388,6 @@ def getLogger(self, name):
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
- finally:
- _releaseLock()
return rv
def setLoggerClass(self, klass):
@@ -1468,12 +1450,11 @@ def _clear_cache(self):
Called when level changes are made
"""
- _acquireLock()
- for logger in self.loggerDict.values():
- if isinstance(logger, Logger):
- logger._cache.clear()
- self.root._cache.clear()
- _releaseLock()
+ with _lock:
+ for logger in self.loggerDict.values():
+ if isinstance(logger, Logger):
+ logger._cache.clear()
+ self.root._cache.clear()
#---------------------------------------------------------------------------
# Logger classes and functions
@@ -1494,6 +1475,8 @@ class Logger(Filterer):
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
+ _tls = threading.local()
+
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
@@ -1552,7 +1535,7 @@ def warning(self, msg, *args, **kwargs):
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
- "use 'warning' instead", DeprecationWarning, 2)
+ "use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
@@ -1649,7 +1632,7 @@ def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
- sinfo)
+ sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
@@ -1690,36 +1673,35 @@ def handle(self, record):
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
- if self.disabled:
- return
- maybe_record = self.filter(record)
- if not maybe_record:
+ if self._is_disabled():
return
- if isinstance(maybe_record, LogRecord):
- record = maybe_record
- self.callHandlers(record)
+
+ self._tls.in_progress = True
+ try:
+ maybe_record = self.filter(record)
+ if not maybe_record:
+ return
+ if isinstance(maybe_record, LogRecord):
+ record = maybe_record
+ self.callHandlers(record)
+ finally:
+ self._tls.in_progress = False
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
- _acquireLock()
- try:
+ with _lock:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
- finally:
- _releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
- _acquireLock()
- try:
+ with _lock:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
- finally:
- _releaseLock()
def hasHandlers(self):
"""
@@ -1791,22 +1773,19 @@ def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
- if self.disabled:
+ if self._is_disabled():
return False
try:
return self._cache[level]
except KeyError:
- _acquireLock()
- try:
+ with _lock:
if self.manager.disable >= level:
is_enabled = self._cache[level] = False
else:
is_enabled = self._cache[level] = (
- level >= self.getEffectiveLevel()
+ level >= self.getEffectiveLevel()
)
- finally:
- _releaseLock()
return is_enabled
def getChild(self, suffix):
@@ -1836,16 +1815,18 @@ def _hierlevel(logger):
return 1 + logger.name.count('.')
d = self.manager.loggerDict
- _acquireLock()
- try:
+ with _lock:
# exclude PlaceHolders - the last check is to ensure that lower-level
# descendants aren't returned - if there are placeholders, a logger's
# parent field might point to a grandparent or ancestor thereof.
return set(item for item in d.values()
if isinstance(item, Logger) and item.parent is self and
_hierlevel(item) == 1 + _hierlevel(item.parent))
- finally:
- _releaseLock()
+
+ def _is_disabled(self):
+ # We need to use getattr as it will only be set the first time a log
+ # message is recorded on any given thread
+ return self.disabled or getattr(self._tls, 'in_progress', False)
def __repr__(self):
level = getLevelName(self.getEffectiveLevel())
@@ -1881,7 +1862,7 @@ class LoggerAdapter(object):
information in logging output.
"""
- def __init__(self, logger, extra=None):
+ def __init__(self, logger, extra=None, merge_extra=False):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
@@ -1891,9 +1872,20 @@ def __init__(self, logger, extra=None):
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
+
+ By default, LoggerAdapter objects will drop the "extra" argument
+ passed on the individual log calls to use its own instead.
+
+ Initializing it with merge_extra=True will instead merge both
+ maps when logging, the individual call extra taking precedence
+ over the LoggerAdapter instance extra
+
+ .. versionchanged:: 3.13
+ The *merge_extra* argument was added.
"""
self.logger = logger
self.extra = extra
+ self.merge_extra = merge_extra
def process(self, msg, kwargs):
"""
@@ -1905,7 +1897,10 @@ def process(self, msg, kwargs):
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
- kwargs["extra"] = self.extra
+ if self.merge_extra and "extra" in kwargs:
+ kwargs["extra"] = {**self.extra, **kwargs["extra"]}
+ else:
+ kwargs["extra"] = self.extra
return msg, kwargs
#
@@ -1931,7 +1926,7 @@ def warning(self, msg, *args, **kwargs):
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
- "use 'warning' instead", DeprecationWarning, 2)
+ "use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
@@ -2088,8 +2083,7 @@ def basicConfig(**kwargs):
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
- _acquireLock()
- try:
+ with _lock:
force = kwargs.pop('force', False)
encoding = kwargs.pop('encoding', None)
errors = kwargs.pop('errors', 'backslashreplace')
@@ -2125,7 +2119,7 @@ def basicConfig(**kwargs):
style = kwargs.pop("style", '%')
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
- _STYLES.keys()))
+ _STYLES.keys()))
fs = kwargs.pop("format", _STYLES[style][1])
fmt = Formatter(fs, dfs, style)
for h in handlers:
@@ -2138,8 +2132,6 @@ def basicConfig(**kwargs):
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
- finally:
- _releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
@@ -2202,7 +2194,7 @@ def warning(msg, *args, **kwargs):
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
- "use 'warning' instead", DeprecationWarning, 2)
+ "use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
index ef04a35168..190b4f9225 100644
--- a/Lib/logging/config.py
+++ b/Lib/logging/config.py
@@ -83,15 +83,12 @@ def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=Non
formatters = _create_formatters(cp)
# critical section
- logging._acquireLock()
- try:
+ with logging._lock:
_clearExistingHandlers()
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
- finally:
- logging._releaseLock()
def _resolve(name):
@@ -314,7 +311,7 @@ def convert_with_key(self, key, value, replace=True):
if replace:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
+ ConvertingTuple):
result.parent = self
result.key = key
return result
@@ -323,7 +320,7 @@ def convert(self, value):
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
+ ConvertingTuple):
result.parent = self
return result
@@ -378,7 +375,7 @@ class BaseConfigurator(object):
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
- INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+ INDEX_PATTERN = re.compile(r'^\[([^\[\]]*)\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
@@ -464,8 +461,8 @@ def convert(self, value):
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
- elif not isinstance(value, ConvertingTuple) and \
- isinstance(value, tuple) and not hasattr(value, '_fields'):
+ elif not isinstance(value, ConvertingTuple) and\
+ isinstance(value, tuple) and not hasattr(value, '_fields'):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
@@ -543,8 +540,7 @@ def configure(self):
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
- logging._acquireLock()
- try:
+ with logging._lock:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
@@ -585,7 +581,7 @@ def configure(self):
for name in formatters:
try:
formatters[name] = self.configure_formatter(
- formatters[name])
+ formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r' % name) from e
@@ -688,8 +684,6 @@ def configure(self):
except Exception as e:
raise ValueError('Unable to configure root '
'logger') from e
- finally:
- logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
@@ -700,10 +694,9 @@ def configure_formatter(self, config):
except TypeError as te:
if "'format'" not in str(te):
raise
- #Name of parameter changed from fmt to format.
- #Retry with old name.
- #This is so that code can be used with older Python versions
- #(e.g. by Django)
+ # logging.Formatter and its subclasses expect the `fmt`
+ # parameter instead of `format`. Retry passing configuration
+ # with `fmt`.
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
@@ -812,7 +805,7 @@ def configure_handler(self, config):
elif issubclass(klass, logging.handlers.QueueHandler):
# Another special case for handler which refers to other handlers
# if 'handlers' not in config:
- # raise ValueError('No handlers specified for a QueueHandler')
+ # raise ValueError('No handlers specified for a QueueHandler')
if 'queue' in config:
qspec = config['queue']
@@ -836,8 +829,8 @@ def configure_handler(self, config):
else:
if isinstance(lspec, str):
listener = self.resolve(lspec)
- if isinstance(listener, type) and \
- not issubclass(listener, logging.handlers.QueueListener):
+ if isinstance(listener, type) and\
+ not issubclass(listener, logging.handlers.QueueListener):
raise TypeError('Invalid listener specifier %r' % lspec)
elif isinstance(lspec, dict):
if '()' not in lspec:
@@ -861,11 +854,11 @@ def configure_handler(self, config):
except Exception as e:
raise ValueError('Unable to set required handler %r' % hn) from e
config['handlers'] = hlist
- elif issubclass(klass, logging.handlers.SMTPHandler) and \
- 'mailhost' in config:
+ elif issubclass(klass, logging.handlers.SMTPHandler) and\
+ 'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
- elif issubclass(klass, logging.handlers.SysLogHandler) and \
- 'address' in config:
+ elif issubclass(klass, logging.handlers.SysLogHandler) and\
+ 'address' in config:
config['address'] = self.as_tuple(config['address'])
if issubclass(klass, logging.handlers.QueueHandler):
factory = functools.partial(self._configure_queue_handler, klass)
@@ -1018,9 +1011,8 @@ class ConfigSocketReceiver(ThreadingTCPServer):
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None, verify=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
- logging._acquireLock()
- self.abort = 0
- logging._releaseLock()
+ with logging._lock:
+ self.abort = 0
self.timeout = 1
self.ready = ready
self.verify = verify
@@ -1034,9 +1026,8 @@ def serve_until_stopped(self):
self.timeout)
if rd:
self.handle_request()
- logging._acquireLock()
- abort = self.abort
- logging._releaseLock()
+ with logging._lock:
+ abort = self.abort
self.server_close()
class Server(threading.Thread):
@@ -1057,9 +1048,8 @@ def run(self):
self.port = server.server_address[1]
self.ready.set()
global _listener
- logging._acquireLock()
- _listener = server
- logging._releaseLock()
+ with logging._lock:
+ _listener = server
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify)
@@ -1069,10 +1059,7 @@ def stopListening():
Stop the listening server which was created with a call to listen().
"""
global _listener
- logging._acquireLock()
- try:
+ with logging._lock:
if _listener:
_listener.abort = 1
_listener = None
- finally:
- logging._releaseLock()
diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py
index bf42ea1103..d3ea06c731 100644
--- a/Lib/logging/handlers.py
+++ b/Lib/logging/handlers.py
@@ -23,11 +23,17 @@
To use, simply 'import logging.handlers' and log away!
"""
-import io, logging, socket, os, pickle, struct, time, re
-from stat import ST_DEV, ST_INO, ST_MTIME
+import copy
+import io
+import logging
+import os
+import pickle
import queue
+import re
+import socket
+import struct
import threading
-import copy
+import time
#
# Some constants...
@@ -272,7 +278,7 @@ def __init__(self, filename, when='h', interval=1, backupCount=0,
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
- t = os.stat(filename)[ST_MTIME]
+ t = int(os.stat(filename).st_mtime)
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
@@ -304,10 +310,10 @@ def computeRollover(self, currentTime):
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
- self.atTime.second)
+ self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
- currentSecond)
+ currentSecond)
if r <= 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
@@ -465,8 +471,7 @@ class WatchedFileHandler(logging.FileHandler):
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
- for such a handler. Furthermore, ST_INO is not supported under
- Windows; stat always returns zero for this value.
+ for such a handler.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
@@ -482,9 +487,11 @@ def __init__(self, filename, mode='a', encoding=None, delay=False,
self._statstream()
def _statstream(self):
- if self.stream:
- sres = os.fstat(self.stream.fileno())
- self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
+ if self.stream is None:
+ return
+ sres = os.fstat(self.stream.fileno())
+ self.dev = sres.st_dev
+ self.ino = sres.st_ino
def reopenIfNeeded(self):
"""
@@ -494,6 +501,9 @@ def reopenIfNeeded(self):
has, close the old stream and reopen the file to get the
current stream.
"""
+ if self.stream is None:
+ return
+
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
@@ -501,18 +511,23 @@ def reopenIfNeeded(self):
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
+
+ # compare file system stat with that of our stream file handle
+ reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino)
except FileNotFoundError:
- sres = None
- # compare file system stat with that of our stream file handle
- if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
- if self.stream is not None:
- # we have an open file handle, clean it up
- self.stream.flush()
- self.stream.close()
- self.stream = None # See Issue #21742: _open () might fail.
- # open a new file handle and get new stat info from that fd
- self.stream = self._open()
- self._statstream()
+ reopen = True
+
+ if not reopen:
+ return
+
+ # we have an open file handle, clean it up
+ self.stream.flush()
+ self.stream.close()
+ self.stream = None # See Issue #21742: _open () might fail.
+
+ # open a new file handle and get new stat info from that fd
+ self.stream = self._open()
+ self._statstream()
def emit(self, record):
"""
@@ -682,15 +697,12 @@ def close(self):
"""
Closes the socket.
"""
- self.acquire()
- try:
+ with self.lock:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
- finally:
- self.release()
class DatagramHandler(SocketHandler):
"""
@@ -803,7 +815,7 @@ class SysLogHandler(logging.Handler):
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
- }
+ }
facility_names = {
"auth": LOG_AUTH,
@@ -830,7 +842,7 @@ class SysLogHandler(logging.Handler):
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
- }
+ }
# Originally added to work around GH-43683. Unnecessary since GH-50043 but kept
# for backwards compatibility.
@@ -950,15 +962,12 @@ def close(self):
"""
Closes the socket.
"""
- self.acquire()
- try:
+ with self.lock:
sock = self.socket
if sock:
self.socket = None
sock.close()
logging.Handler.close(self)
- finally:
- self.release()
def mapPriority(self, levelName):
"""
@@ -1031,7 +1040,8 @@ def __init__(self, mailhost, fromaddr, toaddrs, subject,
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
- certificate file. (This tuple is passed to the `starttls` method).
+ certificate file. (This tuple is passed to the
+ `ssl.SSLContext.load_cert_chain` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
@@ -1084,8 +1094,23 @@ def emit(self, record):
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
+ import ssl
+
+ try:
+ keyfile = self.secure[0]
+ except IndexError:
+ keyfile = None
+
+ try:
+ certfile = self.secure[1]
+ except IndexError:
+ certfile = None
+
+ context = ssl._create_stdlib_context(
+ certfile=certfile, keyfile=keyfile
+ )
smtp.ehlo()
- smtp.starttls(*self.secure)
+ smtp.starttls(context=context)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
@@ -1132,10 +1157,10 @@ def __init__(self, appname, dllname=None, logtype="Application"):
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
- }
+ }
except ImportError:
- print("The Python Win32 extensions for NT (service, event " \
- "logging) appear not to be available.")
+ print("The Python Win32 extensions for NT (service, event "\
+ "logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
@@ -1330,11 +1355,8 @@ def flush(self):
This version just zaps the buffer to empty.
"""
- self.acquire()
- try:
+ with self.lock:
self.buffer.clear()
- finally:
- self.release()
def close(self):
"""
@@ -1378,17 +1400,14 @@ def shouldFlush(self, record):
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
- (record.levelno >= self.flushLevel)
+ (record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
- self.acquire()
- try:
+ with self.lock:
self.target = target
- finally:
- self.release()
def flush(self):
"""
@@ -1398,14 +1417,11 @@ def flush(self):
The record buffer is only cleared if a target has been set.
"""
- self.acquire()
- try:
+ with self.lock:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
- finally:
- self.release()
def close(self):
"""
@@ -1416,12 +1432,9 @@ def close(self):
if self.flushOnClose:
self.flush()
finally:
- self.acquire()
- try:
+ with self.lock:
self.target = None
BufferingHandler.close(self)
- finally:
- self.release()
class QueueHandler(logging.Handler):
@@ -1532,6 +1545,9 @@ def start(self):
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
+ if self._thread is not None:
+ raise RuntimeError("Listener already started")
+
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
@@ -1603,6 +1619,7 @@ def stop(self):
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
- self.enqueue_sentinel()
- self._thread.join()
- self._thread = None
+ if self._thread: # see gh-114706 - allow calling this more than once
+ self.enqueue_sentinel()
+ self._thread.join()
+ self._thread = None
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index d0582e3cd5..8caddd204d 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -19,7 +19,6 @@
import tempfile
import itertools
-import _multiprocessing
from . import util
@@ -28,6 +27,7 @@
_ForkingPickler = reduction.ForkingPickler
try:
+ import _multiprocessing
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
@@ -846,7 +846,7 @@ def PipeClient(address):
_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN)
-def _get_digest_name_and_payload(message: bytes) -> (str, bytes):
+def _get_digest_name_and_payload(message): # type: (bytes) -> tuple[str, bytes]
"""Returns a digest name and the payload for a response hash.
If a legacy protocol is detected based on the message length
@@ -956,7 +956,7 @@ def answer_challenge(connection, authkey: bytes):
f'Protocol error, expected challenge: {message=}')
message = message[len(_CHALLENGE):]
if len(message) < _MD5ONLY_MESSAGE_LENGTH:
- raise AuthenticationError('challenge too short: {len(message)} bytes')
+ raise AuthenticationError(f'challenge too short: {len(message)} bytes')
digest = _create_response(authkey, message)
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
@@ -1012,8 +1012,20 @@ def _exhaustive_wait(handles, timeout):
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
+ # Windows limits WaitForMultipleObjects at 64 handles, and we use a
+ # few for synchronisation, so we switch to batched waits at 60.
+ if len(L) > 60:
+ try:
+ res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout)
+ except TimeoutError:
+ return []
+ ready.extend(L[i] for i in res)
+ if res:
+ L = [h for i, h in enumerate(L) if i > res[0] & i not in res]
+ timeout = 0
while L:
- res = _winapi.WaitForMultipleObjects(L, False, timeout)
+ short_L = L[:60] if len(L) > 60 else L
+ res = _winapi.WaitForMultipleObjects(short_L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
diff --git a/Lib/multiprocessing/context.py b/Lib/multiprocessing/context.py
index de8a264829..f395e8b04d 100644
--- a/Lib/multiprocessing/context.py
+++ b/Lib/multiprocessing/context.py
@@ -145,7 +145,7 @@ def freeze_support(self):
'''Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
- if sys.platform == 'win32' and getattr(sys, 'frozen', False):
+ if self.get_start_method() == 'spawn' and getattr(sys, 'frozen', False):
from .spawn import freeze_support
freeze_support()
diff --git a/Lib/multiprocessing/forkserver.py b/Lib/multiprocessing/forkserver.py
index 4642707dae..bff7fb91d9 100644
--- a/Lib/multiprocessing/forkserver.py
+++ b/Lib/multiprocessing/forkserver.py
@@ -1,3 +1,4 @@
+import atexit
import errno
import os
import selectors
@@ -167,6 +168,8 @@ def ensure_running(self):
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
'''Run forkserver.'''
if preload:
+ if sys_path is not None:
+ sys.path[:] = sys_path
if '__main__' in preload and main_path is not None:
process.current_process()._inheriting = True
try:
@@ -271,6 +274,8 @@ def sigchld_handler(*_unused):
selector.close()
unused_fds = [alive_r, child_w, sig_r, sig_w]
unused_fds.extend(pid_to_fd.values())
+ atexit._clear()
+ atexit.register(util._exit_function)
code = _serve_one(child_r, fds,
unused_fds,
old_handlers)
@@ -278,6 +283,7 @@ def sigchld_handler(*_unused):
sys.excepthook(*sys.exc_info())
sys.stderr.flush()
finally:
+ atexit._run_exitfuncs()
os._exit(code)
else:
# Send pid to client process
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py
index 75d9c18c20..ef791c2751 100644
--- a/Lib/multiprocessing/managers.py
+++ b/Lib/multiprocessing/managers.py
@@ -90,7 +90,10 @@ def dispatch(c, id, methodname, args=(), kwds={}):
kind, result = c.recv()
if kind == '#RETURN':
return result
- raise convert_to_error(kind, result)
+ try:
+ raise convert_to_error(kind, result)
+ finally:
+ del result # break reference cycle
def convert_to_error(kind, result):
if kind == '#ERROR':
@@ -755,22 +758,29 @@ class BaseProxy(object):
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
+ # Each instance gets a `_serial` number. Unlike `id(...)`, this number
+ # is never reused.
+ _next_serial = 1
+
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
- tls_idset = BaseProxy._address_to_local.get(token.address, None)
- if tls_idset is None:
- tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
- BaseProxy._address_to_local[token.address] = tls_idset
+ tls_serials = BaseProxy._address_to_local.get(token.address, None)
+ if tls_serials is None:
+ tls_serials = util.ForkAwareLocal(), ProcessLocalSet()
+ BaseProxy._address_to_local[token.address] = tls_serials
+
+ self._serial = BaseProxy._next_serial
+ BaseProxy._next_serial += 1
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
- self._tls = tls_idset[0]
+ self._tls = tls_serials[0]
- # self._idset is used to record the identities of all shared
- # objects for which the current process owns references and
+ # self._all_serials is a set used to record the identities of all
+ # shared objects for which the current process owns references and
# which are in the manager at token.address
- self._idset = tls_idset[1]
+ self._all_serials = tls_serials[1]
self._token = token
self._id = self._token.id
@@ -833,7 +843,10 @@ def _callmethod(self, methodname, args=(), kwds={}):
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
- raise convert_to_error(kind, result)
+ try:
+ raise convert_to_error(kind, result)
+ finally:
+ del result # break reference cycle
def _getvalue(self):
'''
@@ -850,20 +863,20 @@ def _incref(self):
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
- self._idset.add(self._id)
+ self._all_serials.add(self._serial)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
- args=(self._token, self._authkey, state,
- self._tls, self._idset, self._Client),
+ args=(self._token, self._serial, self._authkey, state,
+ self._tls, self._all_serials, self._Client),
exitpriority=10
)
@staticmethod
- def _decref(token, authkey, state, tls, idset, _Client):
- idset.discard(token.id)
+ def _decref(token, serial, authkey, state, tls, idset, _Client):
+ idset.discard(serial)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
@@ -1159,15 +1172,19 @@ def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
+ __class_getitem__ = classmethod(types.GenericAlias)
-DictProxy = MakeProxyType('DictProxy', (
+
+_BaseDictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
-DictProxy._method_to_typeid_ = {
+_BaseDictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
+class DictProxy(_BaseDictProxy):
+ __class_getitem__ = classmethod(types.GenericAlias)
ArrayProxy = MakeProxyType('ArrayProxy', (
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py
index 4f5d88cb97..f979890170 100644
--- a/Lib/multiprocessing/pool.py
+++ b/Lib/multiprocessing/pool.py
@@ -200,7 +200,7 @@ def __init__(self, processes=None, initializer=None, initargs=(),
self._initargs = initargs
if processes is None:
- processes = os.cpu_count() or 1
+ processes = os.process_cpu_count() or 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if maxtasksperchild is not None:
diff --git a/Lib/multiprocessing/popen_fork.py b/Lib/multiprocessing/popen_fork.py
index 625981cf47..a57ef6bdad 100644
--- a/Lib/multiprocessing/popen_fork.py
+++ b/Lib/multiprocessing/popen_fork.py
@@ -1,3 +1,4 @@
+import atexit
import os
import signal
@@ -66,10 +67,13 @@ def _launch(self, process_obj):
self.pid = os.fork()
if self.pid == 0:
try:
+ atexit._clear()
+ atexit.register(util._exit_function)
os.close(parent_r)
os.close(parent_w)
code = process_obj._bootstrap(parent_sentinel=child_r)
finally:
+ atexit._run_exitfuncs()
os._exit(code)
else:
os.close(child_w)
diff --git a/Lib/multiprocessing/popen_spawn_win32.py b/Lib/multiprocessing/popen_spawn_win32.py
index 49d4c7eea2..62fb0ddbf9 100644
--- a/Lib/multiprocessing/popen_spawn_win32.py
+++ b/Lib/multiprocessing/popen_spawn_win32.py
@@ -3,6 +3,7 @@
import signal
import sys
import _winapi
+from subprocess import STARTUPINFO, STARTF_FORCEOFFFEEDBACK
from .context import reduction, get_spawning_popen, set_spawning_popen
from . import spawn
@@ -74,7 +75,8 @@ def __init__(self, process_obj):
try:
hp, ht, pid, tid = _winapi.CreateProcess(
python_exe, cmd,
- None, None, False, 0, env, None, None)
+ None, None, False, 0, env, None,
+ STARTUPINFO(dwFlags=STARTF_FORCEOFFFEEDBACK))
_winapi.CloseHandle(ht)
except:
_winapi.CloseHandle(rhandle)
diff --git a/Lib/multiprocessing/process.py b/Lib/multiprocessing/process.py
index 271ba3fd32..b45f7df476 100644
--- a/Lib/multiprocessing/process.py
+++ b/Lib/multiprocessing/process.py
@@ -310,11 +310,8 @@ def _bootstrap(self, parent_sentinel=None):
# _run_after_forkers() is executed
del old_process
util.info('child process calling self.run()')
- try:
- self.run()
- exitcode = 0
- finally:
- util._exit_function()
+ self.run()
+ exitcode = 0
except SystemExit as e:
if e.code is None:
exitcode = 0
diff --git a/Lib/multiprocessing/queues.py b/Lib/multiprocessing/queues.py
index 852ae87b27..925f043900 100644
--- a/Lib/multiprocessing/queues.py
+++ b/Lib/multiprocessing/queues.py
@@ -20,8 +20,6 @@
from queue import Empty, Full
-import _multiprocessing
-
from . import connection
from . import context
_ForkingPickler = context.reduction.ForkingPickler
diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py
index 79e96ecf32..05633ac21a 100644
--- a/Lib/multiprocessing/resource_tracker.py
+++ b/Lib/multiprocessing/resource_tracker.py
@@ -29,8 +29,12 @@
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
+def cleanup_noop(name):
+ raise RuntimeError('noop should never be registered or cleaned up')
+
_CLEANUP_FUNCS = {
- 'noop': lambda: None,
+ 'noop': cleanup_noop,
+ 'dummy': lambda name: None, # Dummy resource used in tests
}
if os.name == 'posix':
@@ -61,6 +65,7 @@ def __init__(self):
self._lock = threading.RLock()
self._fd = None
self._pid = None
+ self._exitcode = None
def _reentrant_call_error(self):
# gh-109629: this happens if an explicit call to the ResourceTracker
@@ -70,22 +75,53 @@ def _reentrant_call_error(self):
raise ReentrantCallError(
"Reentrant call into the multiprocessing resource tracker")
- def _stop(self):
- with self._lock:
- # This should not happen (_stop() isn't called by a finalizer)
- # but we check for it anyway.
- if self._lock._recursion_count() > 1:
- return self._reentrant_call_error()
- if self._fd is None:
- # not running
- return
+ def __del__(self):
+ # making sure child processess are cleaned before ResourceTracker
+ # gets destructed.
+ # see https://github.com/python/cpython/issues/88887
+ self._stop(use_blocking_lock=False)
- # closing the "alive" file descriptor stops main()
- os.close(self._fd)
- self._fd = None
+ def _stop(self, use_blocking_lock=True):
+ if use_blocking_lock:
+ with self._lock:
+ self._stop_locked()
+ else:
+ acquired = self._lock.acquire(blocking=False)
+ try:
+ self._stop_locked()
+ finally:
+ if acquired:
+ self._lock.release()
+
+ def _stop_locked(
+ self,
+ close=os.close,
+ waitpid=os.waitpid,
+ waitstatus_to_exitcode=os.waitstatus_to_exitcode,
+ ):
+ # This shouldn't happen (it might when called by a finalizer)
+ # so we check for it anyway.
+ if self._lock._recursion_count() > 1:
+ return self._reentrant_call_error()
+ if self._fd is None:
+ # not running
+ return
+ if self._pid is None:
+ return
+
+ # closing the "alive" file descriptor stops main()
+ close(self._fd)
+ self._fd = None
- os.waitpid(self._pid, 0)
- self._pid = None
+ _, status = waitpid(self._pid, 0)
+
+ self._pid = None
+
+ try:
+ self._exitcode = waitstatus_to_exitcode(status)
+ except ValueError:
+ # os.waitstatus_to_exitcode may raise an exception for invalid values
+ self._exitcode = None
def getfd(self):
self.ensure_running()
@@ -119,6 +155,7 @@ def ensure_running(self):
pass
self._fd = None
self._pid = None
+ self._exitcode = None
warnings.warn('resource_tracker: process died unexpectedly, '
'relaunching. Some resources might leak.')
@@ -142,13 +179,14 @@ def ensure_running(self):
# that can make the child die before it registers signal handlers
# for SIGINT and SIGTERM. The mask is unregistered after spawning
# the child.
+ prev_sigmask = None
try:
if _HAVE_SIGMASK:
- signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
+ prev_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
pid = util.spawnv_passfds(exe, args, fds_to_pass)
finally:
- if _HAVE_SIGMASK:
- signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
+ if prev_sigmask is not None:
+ signal.pthread_sigmask(signal.SIG_SETMASK, prev_sigmask)
except:
os.close(w)
raise
@@ -221,6 +259,8 @@ def main(fd):
pass
cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}
+ exit_code = 0
+
try:
# keep track of registered/unregistered resources
with open(fd, 'rb') as f:
@@ -242,6 +282,7 @@ def main(fd):
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
+ exit_code = 3
try:
sys.excepthook(*sys.exc_info())
except:
@@ -251,9 +292,17 @@ def main(fd):
for rtype, rtype_cache in cache.items():
if rtype_cache:
try:
- warnings.warn('resource_tracker: There appear to be %d '
- 'leaked %s objects to clean up at shutdown' %
- (len(rtype_cache), rtype))
+ exit_code = 1
+ if rtype == 'dummy':
+ # The test 'dummy' resource is expected to leak.
+ # We skip the warning (and *only* the warning) for it.
+ pass
+ else:
+ warnings.warn(
+ f'resource_tracker: There appear to be '
+ f'{len(rtype_cache)} leaked {rtype} objects to '
+ f'clean up at shutdown: {rtype_cache}'
+ )
except Exception:
pass
for name in rtype_cache:
@@ -264,6 +313,9 @@ def main(fd):
try:
_CLEANUP_FUNCS[rtype](name)
except Exception as e:
+ exit_code = 2
warnings.warn('resource_tracker: %r: %s' % (name, e))
finally:
pass
+
+ sys.exit(exit_code)
diff --git a/Lib/multiprocessing/shared_memory.py b/Lib/multiprocessing/shared_memory.py
index 9a1e5aa17b..67e70fdc27 100644
--- a/Lib/multiprocessing/shared_memory.py
+++ b/Lib/multiprocessing/shared_memory.py
@@ -71,8 +71,9 @@ class SharedMemory:
_flags = os.O_RDWR
_mode = 0o600
_prepend_leading_slash = True if _USE_POSIX else False
+ _track = True
- def __init__(self, name=None, create=False, size=0):
+ def __init__(self, name=None, create=False, size=0, *, track=True):
if not size >= 0:
raise ValueError("'size' must be a positive integer")
if create:
@@ -82,6 +83,7 @@ def __init__(self, name=None, create=False, size=0):
if name is None and not self._flags & os.O_EXCL:
raise ValueError("'name' can only be None if create=True")
+ self._track = track
if _USE_POSIX:
# POSIX Shared Memory
@@ -116,8 +118,8 @@ def __init__(self, name=None, create=False, size=0):
except OSError:
self.unlink()
raise
-
- resource_tracker.register(self._name, "shared_memory")
+ if self._track:
+ resource_tracker.register(self._name, "shared_memory")
else:
@@ -236,12 +238,20 @@ def close(self):
def unlink(self):
"""Requests that the underlying shared memory block be destroyed.
- In order to ensure proper cleanup of resources, unlink should be
- called once (and only once) across all processes which have access
- to the shared memory block."""
+ Unlink should be called once (and only once) across all handles
+ which have access to the shared memory block, even if these
+ handles belong to different processes. Closing and unlinking may
+ happen in any order, but trying to access data inside a shared
+ memory block after unlinking may result in memory errors,
+ depending on platform.
+
+ This method has no effect on Windows, where the only way to
+ delete a shared memory block is to close all handles."""
+
if _USE_POSIX and self._name:
_posixshmem.shm_unlink(self._name)
- resource_tracker.unregister(self._name, "shared_memory")
+ if self._track:
+ resource_tracker.unregister(self._name, "shared_memory")
_encoding = "utf8"
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py
index 3ccbfe311c..870c91349b 100644
--- a/Lib/multiprocessing/synchronize.py
+++ b/Lib/multiprocessing/synchronize.py
@@ -174,7 +174,7 @@ def __repr__(self):
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
- elif self._semlock._get_value() == 1:
+ elif not self._semlock._is_zero():
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
@@ -200,7 +200,7 @@ def __repr__(self):
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
- elif self._semlock._get_value() == 1:
+ elif not self._semlock._is_zero():
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
@@ -360,7 +360,7 @@ def wait(self, timeout=None):
return True
return False
- def __repr__(self) -> str:
+ def __repr__(self):
set_status = 'set' if self.is_set() else 'unset'
return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>"
#
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py
index 79559823fb..75dde02d88 100644
--- a/Lib/multiprocessing/util.py
+++ b/Lib/multiprocessing/util.py
@@ -64,8 +64,7 @@ def get_logger():
global _logger
import logging
- logging._acquireLock()
- try:
+ with logging._lock:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
@@ -79,9 +78,6 @@ def get_logger():
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
- finally:
- logging._releaseLock()
-
return _logger
def log_to_stderr(level=None):
@@ -106,11 +102,7 @@ def log_to_stderr(level=None):
# Abstract socket support
def _platform_supports_abstract_sockets():
- if sys.platform == "linux":
- return True
- if hasattr(sys, 'getandroidapilevel'):
- return True
- return False
+ return sys.platform in ("linux", "android")
def is_abstract_socket_namespace(address):
@@ -130,10 +122,7 @@ def is_abstract_socket_namespace(address):
#
def _remove_temp_dir(rmtree, tempdir):
- def onerror(func, path, err_info):
- if not issubclass(err_info[0], FileNotFoundError):
- raise
- rmtree(tempdir, onerror=onerror)
+ rmtree(tempdir)
current_process = process.current_process()
# current_process() can be None if the finalizer is called
diff --git a/Lib/pyclbr.py b/Lib/pyclbr.py
new file mode 100644
index 0000000000..37f86995d6
--- /dev/null
+++ b/Lib/pyclbr.py
@@ -0,0 +1,314 @@
+"""Parse a Python module and describe its classes and functions.
+
+Parse enough of a Python file to recognize imports and class and
+function definitions, and to find out the superclasses of a class.
+
+The interface consists of a single function:
+ readmodule_ex(module, path=None)
+where module is the name of a Python module, and path is an optional
+list of directories where the module is to be searched. If present,
+path is prepended to the system search path sys.path. The return value
+is a dictionary. The keys of the dictionary are the names of the
+classes and functions defined in the module (including classes that are
+defined via the from XXX import YYY construct). The values are
+instances of classes Class and Function. One special key/value pair is
+present for packages: the key '__path__' has a list as its value which
+contains the package search path.
+
+Classes and Functions have a common superclass: _Object. Every instance
+has the following attributes:
+ module -- name of the module;
+ name -- name of the object;
+ file -- file in which the object is defined;
+ lineno -- line in the file where the object's definition starts;
+ end_lineno -- line in the file where the object's definition ends;
+ parent -- parent of this object, if any;
+ children -- nested objects contained in this object.
+The 'children' attribute is a dictionary mapping names to objects.
+
+Instances of Function describe functions with the attributes from _Object,
+plus the following:
+ is_async -- if a function is defined with an 'async' prefix
+
+Instances of Class describe classes with the attributes from _Object,
+plus the following:
+ super -- list of super classes (Class instances if possible);
+ methods -- mapping of method names to beginning line numbers.
+If the name of a super class is not recognized, the corresponding
+entry in the list of super classes is not a class instance but a
+string giving the name of the super class. Since import statements
+are recognized and imported modules are scanned as well, this
+shouldn't happen often.
+"""
+
+import ast
+import sys
+import importlib.util
+
+__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
+
+_modules = {} # Initialize cache of modules we've seen.
+
+
+class _Object:
+ "Information about Python class or function."
+ def __init__(self, module, name, file, lineno, end_lineno, parent):
+ self.module = module
+ self.name = name
+ self.file = file
+ self.lineno = lineno
+ self.end_lineno = end_lineno
+ self.parent = parent
+ self.children = {}
+ if parent is not None:
+ parent.children[name] = self
+
+
+# Odd Function and Class signatures are for back-compatibility.
+class Function(_Object):
+ "Information about a Python function, including methods."
+ def __init__(self, module, name, file, lineno,
+ parent=None, is_async=False, *, end_lineno=None):
+ super().__init__(module, name, file, lineno, end_lineno, parent)
+ self.is_async = is_async
+ if isinstance(parent, Class):
+ parent.methods[name] = lineno
+
+
+class Class(_Object):
+ "Information about a Python class."
+ def __init__(self, module, name, super_, file, lineno,
+ parent=None, *, end_lineno=None):
+ super().__init__(module, name, file, lineno, end_lineno, parent)
+ self.super = super_ or []
+ self.methods = {}
+
+
+# These 2 functions are used in these tests
+# Lib/test/test_pyclbr, Lib/idlelib/idle_test/test_browser.py
+def _nest_function(ob, func_name, lineno, end_lineno, is_async=False):
+ "Return a Function after nesting within ob."
+ return Function(ob.module, func_name, ob.file, lineno,
+ parent=ob, is_async=is_async, end_lineno=end_lineno)
+
+def _nest_class(ob, class_name, lineno, end_lineno, super=None):
+ "Return a Class after nesting within ob."
+ return Class(ob.module, class_name, super, ob.file, lineno,
+ parent=ob, end_lineno=end_lineno)
+
+
+def readmodule(module, path=None):
+ """Return Class objects for the top-level classes in module.
+
+ This is the original interface, before Functions were added.
+ """
+
+ res = {}
+ for key, value in _readmodule(module, path or []).items():
+ if isinstance(value, Class):
+ res[key] = value
+ return res
+
+def readmodule_ex(module, path=None):
+ """Return a dictionary with all functions and classes in module.
+
+ Search for module in PATH + sys.path.
+ If possible, include imported superclasses.
+ Do this by reading source, without importing (and executing) it.
+ """
+ return _readmodule(module, path or [])
+
+
+def _readmodule(module, path, inpackage=None):
+ """Do the hard work for readmodule[_ex].
+
+ If inpackage is given, it must be the dotted name of the package in
+ which we are searching for a submodule, and then PATH must be the
+ package search path; otherwise, we are searching for a top-level
+ module, and path is combined with sys.path.
+ """
+ # Compute the full module name (prepending inpackage if set).
+ if inpackage is not None:
+ fullmodule = "%s.%s" % (inpackage, module)
+ else:
+ fullmodule = module
+
+ # Check in the cache.
+ if fullmodule in _modules:
+ return _modules[fullmodule]
+
+ # Initialize the dict for this module's contents.
+ tree = {}
+
+ # Check if it is a built-in module; we don't do much for these.
+ if module in sys.builtin_module_names and inpackage is None:
+ _modules[module] = tree
+ return tree
+
+ # Check for a dotted module name.
+ i = module.rfind('.')
+ if i >= 0:
+ package = module[:i]
+ submodule = module[i+1:]
+ parent = _readmodule(package, path, inpackage)
+ if inpackage is not None:
+ package = "%s.%s" % (inpackage, package)
+ if not '__path__' in parent:
+ raise ImportError('No package named {}'.format(package))
+ return _readmodule(submodule, parent['__path__'], package)
+
+ # Search the path for the module.
+ f = None
+ if inpackage is not None:
+ search_path = path
+ else:
+ search_path = path + sys.path
+ spec = importlib.util._find_spec_from_path(fullmodule, search_path)
+ if spec is None:
+ raise ModuleNotFoundError(f"no module named {fullmodule!r}", name=fullmodule)
+ _modules[fullmodule] = tree
+ # Is module a package?
+ if spec.submodule_search_locations is not None:
+ tree['__path__'] = spec.submodule_search_locations
+ try:
+ source = spec.loader.get_source(fullmodule)
+ except (AttributeError, ImportError):
+ # If module is not Python source, we cannot do anything.
+ return tree
+ else:
+ if source is None:
+ return tree
+
+ fname = spec.loader.get_filename(fullmodule)
+ return _create_tree(fullmodule, path, fname, source, tree, inpackage)
+
+
+class _ModuleBrowser(ast.NodeVisitor):
+ def __init__(self, module, path, file, tree, inpackage):
+ self.path = path
+ self.tree = tree
+ self.file = file
+ self.module = module
+ self.inpackage = inpackage
+ self.stack = []
+
+ def visit_ClassDef(self, node):
+ bases = []
+ for base in node.bases:
+ name = ast.unparse(base)
+ if name in self.tree:
+ # We know this super class.
+ bases.append(self.tree[name])
+ elif len(names := name.split(".")) > 1:
+ # Super class form is module.class:
+ # look in module for class.
+ *_, module, class_ = names
+ if module in _modules:
+ bases.append(_modules[module].get(class_, name))
+ else:
+ bases.append(name)
+
+ parent = self.stack[-1] if self.stack else None
+ class_ = Class(self.module, node.name, bases, self.file, node.lineno,
+ parent=parent, end_lineno=node.end_lineno)
+ if parent is None:
+ self.tree[node.name] = class_
+ self.stack.append(class_)
+ self.generic_visit(node)
+ self.stack.pop()
+
+ def visit_FunctionDef(self, node, *, is_async=False):
+ parent = self.stack[-1] if self.stack else None
+ function = Function(self.module, node.name, self.file, node.lineno,
+ parent, is_async, end_lineno=node.end_lineno)
+ if parent is None:
+ self.tree[node.name] = function
+ self.stack.append(function)
+ self.generic_visit(node)
+ self.stack.pop()
+
+ def visit_AsyncFunctionDef(self, node):
+ self.visit_FunctionDef(node, is_async=True)
+
+ def visit_Import(self, node):
+ if node.col_offset != 0:
+ return
+
+ for module in node.names:
+ try:
+ try:
+ _readmodule(module.name, self.path, self.inpackage)
+ except ImportError:
+ _readmodule(module.name, [])
+ except (ImportError, SyntaxError):
+ # If we can't find or parse the imported module,
+ # too bad -- don't die here.
+ continue
+
+ def visit_ImportFrom(self, node):
+ if node.col_offset != 0:
+ return
+ try:
+ module = "." * node.level
+ if node.module:
+ module += node.module
+ module = _readmodule(module, self.path, self.inpackage)
+ except (ImportError, SyntaxError):
+ return
+
+ for name in node.names:
+ if name.name in module:
+ self.tree[name.asname or name.name] = module[name.name]
+ elif name.name == "*":
+ for import_name, import_value in module.items():
+ if import_name.startswith("_"):
+ continue
+ self.tree[import_name] = import_value
+
+
+def _create_tree(fullmodule, path, fname, source, tree, inpackage):
+ mbrowser = _ModuleBrowser(fullmodule, path, fname, tree, inpackage)
+ mbrowser.visit(ast.parse(source))
+ return mbrowser.tree
+
+
+def _main():
+ "Print module output (default this file) for quick visual check."
+ import os
+ try:
+ mod = sys.argv[1]
+ except:
+ mod = __file__
+ if os.path.exists(mod):
+ path = [os.path.dirname(mod)]
+ mod = os.path.basename(mod)
+ if mod.lower().endswith(".py"):
+ mod = mod[:-3]
+ else:
+ path = []
+ tree = readmodule_ex(mod, path)
+ lineno_key = lambda a: getattr(a, 'lineno', 0)
+ objs = sorted(tree.values(), key=lineno_key, reverse=True)
+ indent_level = 2
+ while objs:
+ obj = objs.pop()
+ if isinstance(obj, list):
+ # Value is a __path__ key.
+ continue
+ if not hasattr(obj, 'indent'):
+ obj.indent = 0
+
+ if isinstance(obj, _Object):
+ new_objs = sorted(obj.children.values(),
+ key=lineno_key, reverse=True)
+ for ob in new_objs:
+ ob.indent = obj.indent + indent_level
+ objs.extend(new_objs)
+ if isinstance(obj, Class):
+ print("{}class {} {} {}"
+ .format(' ' * obj.indent, obj.name, obj.super, obj.lineno))
+ elif isinstance(obj, Function):
+ print("{}def {} {}".format(' ' * obj.indent, obj.name, obj.lineno))
+
+if __name__ == "__main__":
+ _main()
diff --git a/Lib/quopri.py b/Lib/quopri.py
index 08899c5cb7..f36cf7b395 100755
--- a/Lib/quopri.py
+++ b/Lib/quopri.py
@@ -67,10 +67,7 @@ def write(s, output=output, lineEnd=b'\n'):
output.write(s + lineEnd)
prevline = None
- while 1:
- line = input.readline()
- if not line:
- break
+ while line := input.readline():
outline = []
# Strip off any readline induced trailing newline
stripped = b''
@@ -126,9 +123,7 @@ def decode(input, output, header=False):
return
new = b''
- while 1:
- line = input.readline()
- if not line: break
+ while line := input.readline():
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py
index bca4a7bc52..23eb0020f4 100644
--- a/Lib/rlcompleter.py
+++ b/Lib/rlcompleter.py
@@ -31,7 +31,11 @@
import atexit
import builtins
+import inspect
+import keyword
+import re
import __main__
+import warnings
__all__ = ["Completer"]
@@ -85,10 +89,11 @@ def complete(self, text, state):
return None
if state == 0:
- if "." in text:
- self.matches = self.attr_matches(text)
- else:
- self.matches = self.global_matches(text)
+ with warnings.catch_warnings(action="ignore"):
+ if "." in text:
+ self.matches = self.attr_matches(text)
+ else:
+ self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
@@ -96,7 +101,13 @@ def complete(self, text, state):
def _callable_postfix(self, val, word):
if callable(val):
- word = word + "("
+ word += "("
+ try:
+ if not inspect.signature(val).parameters:
+ word += ")"
+ except ValueError:
+ pass
+
return word
def global_matches(self, text):
@@ -106,18 +117,17 @@ def global_matches(self, text):
defined in self.namespace that match.
"""
- import keyword
matches = []
seen = {"__builtins__"}
n = len(text)
- for word in keyword.kwlist:
+ for word in keyword.kwlist + keyword.softkwlist:
if word[:n] == text:
seen.add(word)
if word in {'finally', 'try'}:
word = word + ':'
elif word not in {'False', 'None', 'True',
'break', 'continue', 'pass',
- 'else'}:
+ 'else', '_'}:
word = word + ' '
matches.append(word)
for nspace in [self.namespace, builtins.__dict__]:
@@ -139,7 +149,6 @@ def attr_matches(self, text):
with a __getattr__ hook is evaluated.
"""
- import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
@@ -169,13 +178,20 @@ def attr_matches(self, text):
if (word[:n] == attr and
not (noprefix and word[:n+1] == noprefix)):
match = "%s.%s" % (expr, word)
- try:
- val = getattr(thisobject, word)
- except Exception:
- pass # Include even if attribute not set
+ if isinstance(getattr(type(thisobject), word, None),
+ property):
+ # bpo-44752: thisobject.word is a method decorated by
+ # `@property`. What follows applies a postfix if
+ # thisobject.word is callable, but know we know that
+ # this is not callable (because it is a property).
+ # Also, getattr(thisobject, word) will evaluate the
+ # property method, which is not desirable.
+ matches.append(match)
+ continue
+ if (value := getattr(thisobject, word, None)) is not None:
+ matches.append(self._callable_postfix(value, match))
else:
- match = self._callable_postfix(val, match)
- matches.append(match)
+ matches.append(match)
if matches or not noprefix:
break
if noprefix == '_':
diff --git a/Lib/secrets.py b/Lib/secrets.py
index a546efbdd4..566a09b731 100644
--- a/Lib/secrets.py
+++ b/Lib/secrets.py
@@ -2,7 +2,7 @@
managing secrets such as account authentication, tokens, and similar.
See PEP 506 for more information.
-https://www.python.org/dev/peps/pep-0506/
+https://peps.python.org/pep-0506/
"""
@@ -13,7 +13,6 @@
import base64
-import binascii
from hmac import compare_digest
from random import SystemRandom
@@ -56,7 +55,7 @@ def token_hex(nbytes=None):
'f9bf78b9a18ce6d46a0cd2b0b86df9da'
"""
- return binascii.hexlify(token_bytes(nbytes)).decode('ascii')
+ return token_bytes(nbytes).hex()
def token_urlsafe(nbytes=None):
"""Return a random URL-safe text string, in Base64 encoding.
diff --git a/Lib/selectors.py b/Lib/selectors.py
index c3b065b522..b8e5f6a4f7 100644
--- a/Lib/selectors.py
+++ b/Lib/selectors.py
@@ -66,12 +66,16 @@ def __init__(self, selector):
def __len__(self):
return len(self._selector._fd_to_key)
+ def get(self, fileobj, default=None):
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key.get(fd, default)
+
def __getitem__(self, fileobj):
- try:
- fd = self._selector._fileobj_lookup(fileobj)
- return self._selector._fd_to_key[fd]
- except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ fd = self._selector._fileobj_lookup(fileobj)
+ key = self._selector._fd_to_key.get(fd)
+ if key is None:
+ raise KeyError("{!r} is not registered".format(fileobj))
+ return key
def __iter__(self):
return iter(self._selector._fd_to_key)
@@ -272,19 +276,6 @@ def close(self):
def get_map(self):
return self._map
- def _key_from_fd(self, fd):
- """Return the key associated to a given file descriptor.
-
- Parameters:
- fd -- file descriptor
-
- Returns:
- corresponding key, or None if not found
- """
- try:
- return self._fd_to_key[fd]
- except KeyError:
- return None
class SelectSelector(_BaseSelectorImpl):
@@ -323,17 +314,15 @@ def select(self, timeout=None):
r, w, _ = self._select(self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
- r = set(r)
- w = set(w)
- for fd in r | w:
- events = 0
- if fd in r:
- events |= EVENT_READ
- if fd in w:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
+ r = frozenset(r)
+ w = frozenset(w)
+ rw = r | w
+ fd_to_key_get = self._fd_to_key.get
+ for fd in rw:
+ key = fd_to_key_get(fd)
if key:
+ events = ((fd in r and EVENT_READ)
+ | (fd in w and EVENT_WRITE))
ready.append((key, events & key.events))
return ready
@@ -350,11 +339,8 @@ def __init__(self):
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
- poller_events = 0
- if events & EVENT_READ:
- poller_events |= self._EVENT_READ
- if events & EVENT_WRITE:
- poller_events |= self._EVENT_WRITE
+ poller_events = ((events & EVENT_READ and self._EVENT_READ)
+ | (events & EVENT_WRITE and self._EVENT_WRITE) )
try:
self._selector.register(key.fd, poller_events)
except:
@@ -380,11 +366,8 @@ def modify(self, fileobj, events, data=None):
changed = False
if events != key.events:
- selector_events = 0
- if events & EVENT_READ:
- selector_events |= self._EVENT_READ
- if events & EVENT_WRITE:
- selector_events |= self._EVENT_WRITE
+ selector_events = ((events & EVENT_READ and self._EVENT_READ)
+ | (events & EVENT_WRITE and self._EVENT_WRITE))
try:
self._selector.modify(key.fd, selector_events)
except:
@@ -415,15 +398,13 @@ def select(self, timeout=None):
fd_event_list = self._selector.poll(timeout)
except InterruptedError:
return ready
- for fd, event in fd_event_list:
- events = 0
- if event & ~self._EVENT_READ:
- events |= EVENT_WRITE
- if event & ~self._EVENT_WRITE:
- events |= EVENT_READ
- key = self._key_from_fd(fd)
+ fd_to_key_get = self._fd_to_key.get
+ for fd, event in fd_event_list:
+ key = fd_to_key_get(fd)
if key:
+ events = ((event & ~self._EVENT_READ and EVENT_WRITE)
+ | (event & ~self._EVENT_WRITE and EVENT_READ))
ready.append((key, events & key.events))
return ready
@@ -439,6 +420,9 @@ class PollSelector(_PollLikeSelector):
if hasattr(select, 'epoll'):
+ _NOT_EPOLLIN = ~select.EPOLLIN
+ _NOT_EPOLLOUT = ~select.EPOLLOUT
+
class EpollSelector(_PollLikeSelector):
"""Epoll-based selector."""
_selector_cls = select.epoll
@@ -461,22 +445,20 @@ def select(self, timeout=None):
# epoll_wait() expects `maxevents` to be greater than zero;
# we want to make sure that `select()` can be called when no
# FD is registered.
- max_ev = max(len(self._fd_to_key), 1)
+ max_ev = len(self._fd_to_key) or 1
ready = []
try:
fd_event_list = self._selector.poll(timeout, max_ev)
except InterruptedError:
return ready
- for fd, event in fd_event_list:
- events = 0
- if event & ~select.EPOLLIN:
- events |= EVENT_WRITE
- if event & ~select.EPOLLOUT:
- events |= EVENT_READ
- key = self._key_from_fd(fd)
+ fd_to_key = self._fd_to_key
+ for fd, event in fd_event_list:
+ key = fd_to_key.get(fd)
if key:
+ events = ((event & _NOT_EPOLLIN and EVENT_WRITE)
+ | (event & _NOT_EPOLLOUT and EVENT_READ))
ready.append((key, events & key.events))
return ready
@@ -566,17 +548,15 @@ def select(self, timeout=None):
kev_list = self._selector.control(None, max_ev, timeout)
except InterruptedError:
return ready
+
+ fd_to_key_get = self._fd_to_key.get
for kev in kev_list:
fd = kev.ident
flag = kev.filter
- events = 0
- if flag == select.KQ_FILTER_READ:
- events |= EVENT_READ
- if flag == select.KQ_FILTER_WRITE:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
+ key = fd_to_key_get(fd)
if key:
+ events = ((flag == select.KQ_FILTER_READ and EVENT_READ)
+ | (flag == select.KQ_FILTER_WRITE and EVENT_WRITE))
ready.append((key, events & key.events))
return ready
diff --git a/Lib/shlex.py b/Lib/shlex.py
index 4801a6c1d4..f4821616b6 100644
--- a/Lib/shlex.py
+++ b/Lib/shlex.py
@@ -305,9 +305,7 @@ def __next__(self):
def split(s, comments=False, posix=True):
"""Split the string *s* using shell-like syntax."""
if s is None:
- import warnings
- warnings.warn("Passing None for 's' to shlex.split() is deprecated.",
- DeprecationWarning, stacklevel=2)
+ raise ValueError("s argument must not be None")
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
@@ -335,10 +333,7 @@ def quote(s):
def _print_tokens(lexer):
- while 1:
- tt = lexer.get_token()
- if not tt:
- break
+ while tt := lexer.get_token():
print("Token: " + repr(tt))
if __name__ == '__main__':
diff --git a/Lib/stat.py b/Lib/stat.py
index fc024db3f4..1b4ed1ebc9 100644
--- a/Lib/stat.py
+++ b/Lib/stat.py
@@ -110,22 +110,30 @@ def S_ISWHT(mode):
S_IXOTH = 0o0001 # execute by others
# Names for file flags
-
+UF_SETTABLE = 0x0000ffff # owner settable flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
-UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
-UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
+UF_COMPRESSED = 0x00000020 # macOS: file is compressed
+UF_TRACKED = 0x00000040 # macOS: used for handling document IDs
+UF_DATAVAULT = 0x00000080 # macOS: entitlement needed for I/O
+UF_HIDDEN = 0x00008000 # macOS: file should not be displayed
+SF_SETTABLE = 0xffff0000 # superuser settable flags
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
+SF_RESTRICTED = 0x00080000 # macOS: entitlement needed for writing
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
+SF_FIRMLINK = 0x00800000 # macOS: file is a firmlink
+SF_DATALESS = 0x40000000 # macOS: file is a dataless object
_filemode_table = (
+ # File type chars according to:
+ # http://en.wikibooks.org/wiki/C_Programming/POSIX_Reference/sys/stat.h
((S_IFLNK, "l"),
(S_IFSOCK, "s"), # Must appear before IFREG and IFDIR as IFSOCK == IFREG | IFDIR
(S_IFREG, "-"),
@@ -156,13 +164,17 @@ def S_ISWHT(mode):
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
- for table in _filemode_table:
+ for index, table in enumerate(_filemode_table):
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
- perm.append("-")
+ if index == 0:
+ # Unknown filetype
+ perm.append("?")
+ else:
+ perm.append("-")
return "".join(perm)
diff --git a/Lib/statistics.py b/Lib/statistics.py
index f66245380a..ad4a94219c 100644
--- a/Lib/statistics.py
+++ b/Lib/statistics.py
@@ -11,7 +11,7 @@
Function Description
================== ==================================================
mean Arithmetic mean (average) of data.
-fmean Fast, floating point arithmetic mean.
+fmean Fast, floating-point arithmetic mean.
geometric_mean Geometric mean of data.
harmonic_mean Harmonic mean of data.
median Median (middle value) of data.
@@ -112,6 +112,8 @@
'fmean',
'geometric_mean',
'harmonic_mean',
+ 'kde',
+ 'kde_random',
'linear_regression',
'mean',
'median',
@@ -130,14 +132,20 @@
import math
import numbers
import random
+import sys
from fractions import Fraction
from decimal import Decimal
-from itertools import groupby, repeat
+from itertools import count, groupby, repeat
from bisect import bisect_left, bisect_right
-from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum
+from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum, sumprod
+from math import isfinite, isinf, pi, cos, sin, tan, cosh, asin, atan, acos
+from functools import reduce
from operator import itemgetter
-from collections import Counter, namedtuple
+from collections import Counter, namedtuple, defaultdict
+
+_SQRT2 = sqrt(2.0)
+_random = random
# === Exceptions ===
@@ -180,11 +188,12 @@ def _sum(data):
allowed.
"""
count = 0
+ types = set()
+ types_add = types.add
partials = {}
partials_get = partials.get
- T = int
for typ, values in groupby(data, type):
- T = _coerce(T, typ) # or raise TypeError
+ types_add(typ)
for n, d in map(_exact_ratio, values):
count += 1
partials[d] = partials_get(d, 0) + n
@@ -196,9 +205,51 @@ def _sum(data):
else:
# Sum all the partial sums using builtin sum.
total = sum(Fraction(n, d) for d, n in partials.items())
+ T = reduce(_coerce, types, int) # or raise TypeError
return (T, total, count)
+def _ss(data, c=None):
+ """Return the exact mean and sum of square deviations of sequence data.
+
+ Calculations are done in a single pass, allowing the input to be an iterator.
+
+ If given *c* is used the mean; otherwise, it is calculated from the data.
+ Use the *c* argument with care, as it can lead to garbage results.
+
+ """
+ if c is not None:
+ T, ssd, count = _sum((d := x - c) * d for x in data)
+ return (T, ssd, c, count)
+ count = 0
+ types = set()
+ types_add = types.add
+ sx_partials = defaultdict(int)
+ sxx_partials = defaultdict(int)
+ for typ, values in groupby(data, type):
+ types_add(typ)
+ for n, d in map(_exact_ratio, values):
+ count += 1
+ sx_partials[d] += n
+ sxx_partials[d] += n * n
+ if not count:
+ ssd = c = Fraction(0)
+ elif None in sx_partials:
+ # The sum will be a NAN or INF. We can ignore all the finite
+ # partials, and just look at this special one.
+ ssd = c = sx_partials[None]
+ assert not _isfinite(ssd)
+ else:
+ sx = sum(Fraction(n, d) for d, n in sx_partials.items())
+ sxx = sum(Fraction(n, d*d) for d, n in sxx_partials.items())
+ # This formula has poor numeric properties for floats,
+ # but with fractions it is exact.
+ ssd = (count * sxx - sx * sx) / count
+ c = sx / count
+ T = reduce(_coerce, types, int) # or raise TypeError
+ return (T, ssd, c, count)
+
+
def _isfinite(x):
try:
return x.is_finite() # Likely a Decimal.
@@ -245,6 +296,28 @@ def _exact_ratio(x):
x is expected to be an int, Fraction, Decimal or float.
"""
+
+ # XXX We should revisit whether using fractions to accumulate exact
+ # ratios is the right way to go.
+
+ # The integer ratios for binary floats can have numerators or
+ # denominators with over 300 decimal digits. The problem is more
+ # acute with decimal floats where the default decimal context
+ # supports a huge range of exponents from Emin=-999999 to
+ # Emax=999999. When expanded with as_integer_ratio(), numbers like
+ # Decimal('3.14E+5000') and Decimal('3.14E-5000') have large
+ # numerators or denominators that will slow computation.
+
+ # When the integer ratios are accumulated as fractions, the size
+ # grows to cover the full range from the smallest magnitude to the
+ # largest. For example, Fraction(3.14E+300) + Fraction(3.14E-300),
+ # has a 616 digit numerator. Likewise,
+ # Fraction(Decimal('3.14E+5000')) + Fraction(Decimal('3.14E-5000'))
+ # has 10,003 digit numerator.
+
+ # This doesn't seem to have been problem in practice, but it is a
+ # potential pitfall.
+
try:
return x.as_integer_ratio()
except AttributeError:
@@ -279,22 +352,6 @@ def _convert(value, T):
raise
-def _find_lteq(a, x):
- 'Locate the leftmost value exactly equal to x'
- i = bisect_left(a, x)
- if i != len(a) and a[i] == x:
- return i
- raise ValueError
-
-
-def _find_rteq(a, l, x):
- 'Locate the rightmost value exactly equal to x'
- i = bisect_right(a, x, lo=l)
- if i != (len(a) + 1) and a[i - 1] == x:
- return i - 1
- raise ValueError
-
-
def _fail_neg(values, errmsg='negative value'):
"""Iterate over values, failing if any are less than zero."""
for x in values:
@@ -303,6 +360,113 @@ def _fail_neg(values, errmsg='negative value'):
yield x
+def _rank(data, /, *, key=None, reverse=False, ties='average', start=1) -> list[float]:
+ """Rank order a dataset. The lowest value has rank 1.
+
+ Ties are averaged so that equal values receive the same rank:
+
+ >>> data = [31, 56, 31, 25, 75, 18]
+ >>> _rank(data)
+ [3.5, 5.0, 3.5, 2.0, 6.0, 1.0]
+
+ The operation is idempotent:
+
+ >>> _rank([3.5, 5.0, 3.5, 2.0, 6.0, 1.0])
+ [3.5, 5.0, 3.5, 2.0, 6.0, 1.0]
+
+ It is possible to rank the data in reverse order so that the
+ highest value has rank 1. Also, a key-function can extract
+ the field to be ranked:
+
+ >>> goals = [('eagles', 45), ('bears', 48), ('lions', 44)]
+ >>> _rank(goals, key=itemgetter(1), reverse=True)
+ [2.0, 1.0, 3.0]
+
+ Ranks are conventionally numbered starting from one; however,
+ setting *start* to zero allows the ranks to be used as array indices:
+
+ >>> prize = ['Gold', 'Silver', 'Bronze', 'Certificate']
+ >>> scores = [8.1, 7.3, 9.4, 8.3]
+ >>> [prize[int(i)] for i in _rank(scores, start=0, reverse=True)]
+ ['Bronze', 'Certificate', 'Gold', 'Silver']
+
+ """
+ # If this function becomes public at some point, more thought
+ # needs to be given to the signature. A list of ints is
+ # plausible when ties is "min" or "max". When ties is "average",
+ # either list[float] or list[Fraction] is plausible.
+
+ # Default handling of ties matches scipy.stats.mstats.spearmanr.
+ if ties != 'average':
+ raise ValueError(f'Unknown tie resolution method: {ties!r}')
+ if key is not None:
+ data = map(key, data)
+ val_pos = sorted(zip(data, count()), reverse=reverse)
+ i = start - 1
+ result = [0] * len(val_pos)
+ for _, g in groupby(val_pos, key=itemgetter(0)):
+ group = list(g)
+ size = len(group)
+ rank = i + (size + 1) / 2
+ for value, orig_pos in group:
+ result[orig_pos] = rank
+ i += size
+ return result
+
+
+def _integer_sqrt_of_frac_rto(n: int, m: int) -> int:
+ """Square root of n/m, rounded to the nearest integer using round-to-odd."""
+ # Reference: https://www.lri.fr/~melquion/doc/05-imacs17_1-expose.pdf
+ a = math.isqrt(n // m)
+ return a | (a*a*m != n)
+
+
+# For 53 bit precision floats, the bit width used in
+# _float_sqrt_of_frac() is 109.
+_sqrt_bit_width: int = 2 * sys.float_info.mant_dig + 3
+
+
+def _float_sqrt_of_frac(n: int, m: int) -> float:
+ """Square root of n/m as a float, correctly rounded."""
+ # See principle and proof sketch at: https://bugs.python.org/msg407078
+ q = (n.bit_length() - m.bit_length() - _sqrt_bit_width) // 2
+ if q >= 0:
+ numerator = _integer_sqrt_of_frac_rto(n, m << 2 * q) << q
+ denominator = 1
+ else:
+ numerator = _integer_sqrt_of_frac_rto(n << -2 * q, m)
+ denominator = 1 << -q
+ return numerator / denominator # Convert to float
+
+
+def _decimal_sqrt_of_frac(n: int, m: int) -> Decimal:
+ """Square root of n/m as a Decimal, correctly rounded."""
+ # Premise: For decimal, computing (n/m).sqrt() can be off
+ # by 1 ulp from the correctly rounded result.
+ # Method: Check the result, moving up or down a step if needed.
+ if n <= 0:
+ if not n:
+ return Decimal('0.0')
+ n, m = -n, -m
+
+ root = (Decimal(n) / Decimal(m)).sqrt()
+ nr, dr = root.as_integer_ratio()
+
+ plus = root.next_plus()
+ np, dp = plus.as_integer_ratio()
+ # test: n / m > ((root + plus) / 2) ** 2
+ if 4 * n * (dr*dp)**2 > m * (dr*np + dp*nr)**2:
+ return plus
+
+ minus = root.next_minus()
+ nm, dm = minus.as_integer_ratio()
+ # test: n / m < ((root + minus) / 2) ** 2
+ if 4 * n * (dr*dm)**2 < m * (dr*nm + dm*nr)**2:
+ return minus
+
+ return root
+
+
# === Measures of central tendency (averages) ===
def mean(data):
@@ -321,17 +485,13 @@ def mean(data):
If ``data`` is empty, StatisticsError will be raised.
"""
- if iter(data) is data:
- data = list(data)
- n = len(data)
+ T, total, n = _sum(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
- T, total, count = _sum(data)
- assert count == n
return _convert(total / n, T)
-def fmean(data):
+def fmean(data, weights=None):
"""Convert data to floats and compute the arithmetic mean.
This runs faster than the mean() function and it always returns a float.
@@ -340,29 +500,40 @@ def fmean(data):
>>> fmean([3.5, 4.0, 5.25])
4.25
"""
- try:
- n = len(data)
- except TypeError:
- # Handle iterators that do not define __len__().
- n = 0
- def count(iterable):
- nonlocal n
- for n, x in enumerate(iterable, start=1):
- yield x
- total = fsum(count(data))
- else:
+ if weights is None:
+ try:
+ n = len(data)
+ except TypeError:
+ # Handle iterators that do not define __len__().
+ n = 0
+ def count(iterable):
+ nonlocal n
+ for n, x in enumerate(iterable, start=1):
+ yield x
+ data = count(data)
total = fsum(data)
- try:
+ if not n:
+ raise StatisticsError('fmean requires at least one data point')
return total / n
- except ZeroDivisionError:
- raise StatisticsError('fmean requires at least one data point') from None
+ if not isinstance(weights, (list, tuple)):
+ weights = list(weights)
+ try:
+ num = sumprod(data, weights)
+ except ValueError:
+ raise StatisticsError('data and weights must be the same length')
+ den = fsum(weights)
+ if not den:
+ raise StatisticsError('sum of weights must be non-zero')
+ return num / den
def geometric_mean(data):
"""Convert data to floats and compute the geometric mean.
- Raises a StatisticsError if the input dataset is empty,
- if it contains a zero, or if it contains a negative value.
+ Raises a StatisticsError if the input dataset is empty
+ or if it contains a negative value.
+
+ Returns zero if the product of inputs is zero.
No special efforts are made to achieve exact results.
(However, this may change in the future.)
@@ -370,11 +541,25 @@ def geometric_mean(data):
>>> round(geometric_mean([54, 24, 36]), 9)
36.0
"""
- try:
- return exp(fmean(map(log, data)))
- except ValueError:
- raise StatisticsError('geometric mean requires a non-empty dataset '
- 'containing positive numbers') from None
+ n = 0
+ found_zero = False
+ def count_positive(iterable):
+ nonlocal n, found_zero
+ for n, x in enumerate(iterable, start=1):
+ if x > 0.0 or math.isnan(x):
+ yield x
+ elif x == 0.0:
+ found_zero = True
+ else:
+ raise StatisticsError('No negative inputs allowed', x)
+ total = fsum(map(log, count_positive(data)))
+ if not n:
+ raise StatisticsError('Must have a non-empty dataset')
+ if math.isnan(total):
+ return math.nan
+ if found_zero:
+ return math.nan if total == math.inf else 0.0
+ return exp(total / n)
def harmonic_mean(data, weights=None):
@@ -498,58 +683,75 @@ def median_high(data):
return data[n // 2]
-def median_grouped(data, interval=1):
- """Return the 50th percentile (median) of grouped continuous data.
+def median_grouped(data, interval=1.0):
+ """Estimates the median for numeric data binned around the midpoints
+ of consecutive, fixed-width intervals.
- >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])
- 3.7
- >>> median_grouped([52, 52, 53, 54])
- 52.5
+ The *data* can be any iterable of numeric data with each value being
+ exactly the midpoint of a bin. At least one value must be present.
- This calculates the median as the 50th percentile, and should be
- used when your data is continuous and grouped. In the above example,
- the values 1, 2, 3, etc. actually represent the midpoint of classes
- 0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in
- class 3.5-4.5, and interpolation is used to estimate it.
+ The *interval* is width of each bin.
- Optional argument ``interval`` represents the class interval, and
- defaults to 1. Changing the class interval naturally will change the
- interpolated 50th percentile value:
+ For example, demographic information may have been summarized into
+ consecutive ten-year age groups with each group being represented
+ by the 5-year midpoints of the intervals:
- >>> median_grouped([1, 3, 3, 5, 7], interval=1)
- 3.25
- >>> median_grouped([1, 3, 3, 5, 7], interval=2)
- 3.5
+ >>> demographics = Counter({
+ ... 25: 172, # 20 to 30 years old
+ ... 35: 484, # 30 to 40 years old
+ ... 45: 387, # 40 to 50 years old
+ ... 55: 22, # 50 to 60 years old
+ ... 65: 6, # 60 to 70 years old
+ ... })
+
+ The 50th percentile (median) is the 536th person out of the 1071
+ member cohort. That person is in the 30 to 40 year old age group.
+
+ The regular median() function would assume that everyone in the
+ tricenarian age group was exactly 35 years old. A more tenable
+ assumption is that the 484 members of that age group are evenly
+ distributed between 30 and 40. For that, we use median_grouped().
+
+ >>> data = list(demographics.elements())
+ >>> median(data)
+ 35
+ >>> round(median_grouped(data, interval=10), 1)
+ 37.5
+
+ The caller is responsible for making sure the data points are separated
+ by exact multiples of *interval*. This is essential for getting a
+ correct result. The function does not check this precondition.
+
+ Inputs may be any numeric type that can be coerced to a float during
+ the interpolation step.
- This function does not check whether the data points are at least
- ``interval`` apart.
"""
data = sorted(data)
n = len(data)
- if n == 0:
+ if not n:
raise StatisticsError("no median for empty data")
- elif n == 1:
- return data[0]
+
# Find the value at the midpoint. Remember this corresponds to the
- # centre of the class interval.
+ # midpoint of the class interval.
x = data[n // 2]
- for obj in (x, interval):
- if isinstance(obj, (str, bytes)):
- raise TypeError('expected number but got %r' % obj)
+
+ # Using O(log n) bisection, find where all the x values occur in the data.
+ # All x will lie within data[i:j].
+ i = bisect_left(data, x)
+ j = bisect_right(data, x, lo=i)
+
+ # Coerce to floats, raising a TypeError if not possible
try:
- L = x - interval / 2 # The lower limit of the median interval.
- except TypeError:
- # Mixed type. For now we just coerce to float.
- L = float(x) - float(interval) / 2
-
- # Uses bisection search to search for x in data with log(n) time complexity
- # Find the position of leftmost occurrence of x in data
- l1 = _find_lteq(data, x)
- # Find the position of rightmost occurrence of x in data[l1...len(data)]
- # Assuming always l1 <= l2
- l2 = _find_rteq(data, l1, x)
- cf = l1
- f = l2 - l1 + 1
+ interval = float(interval)
+ x = float(x)
+ except ValueError:
+ raise TypeError(f'Value cannot be converted to a float')
+
+ # Interpolate the median using the formula found at:
+ # https://www.cuemath.com/data/median-of-grouped-data/
+ L = x - interval / 2.0 # Lower limit of the median interval
+ cf = i # Cumulative frequency of the preceding interval
+ f = j - i # Number of elements in the median internal
return L + interval * (n / 2 - cf) / f
@@ -596,9 +798,223 @@ def multimode(data):
>>> multimode('')
[]
"""
- counts = Counter(iter(data)).most_common()
- maxcount, mode_items = next(groupby(counts, key=itemgetter(1)), (0, []))
- return list(map(itemgetter(0), mode_items))
+ counts = Counter(iter(data))
+ if not counts:
+ return []
+ maxcount = max(counts.values())
+ return [value for value, count in counts.items() if count == maxcount]
+
+
+def kde(data, h, kernel='normal', *, cumulative=False):
+ """Kernel Density Estimation: Create a continuous probability density
+ function or cumulative distribution function from discrete samples.
+
+ The basic idea is to smooth the data using a kernel function
+ to help draw inferences about a population from a sample.
+
+ The degree of smoothing is controlled by the scaling parameter h
+ which is called the bandwidth. Smaller values emphasize local
+ features while larger values give smoother results.
+
+ The kernel determines the relative weights of the sample data
+ points. Generally, the choice of kernel shape does not matter
+ as much as the more influential bandwidth smoothing parameter.
+
+ Kernels that give some weight to every sample point:
+
+ normal (gauss)
+ logistic
+ sigmoid
+
+ Kernels that only give weight to sample points within
+ the bandwidth:
+
+ rectangular (uniform)
+ triangular
+ parabolic (epanechnikov)
+ quartic (biweight)
+ triweight
+ cosine
+
+ If *cumulative* is true, will return a cumulative distribution function.
+
+ A StatisticsError will be raised if the data sequence is empty.
+
+ Example
+ -------
+
+ Given a sample of six data points, construct a continuous
+ function that estimates the underlying probability density:
+
+ >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2]
+ >>> f_hat = kde(sample, h=1.5)
+
+ Compute the area under the curve:
+
+ >>> area = sum(f_hat(x) for x in range(-20, 20))
+ >>> round(area, 4)
+ 1.0
+
+ Plot the estimated probability density function at
+ evenly spaced points from -6 to 10:
+
+ >>> for x in range(-6, 11):
+ ... density = f_hat(x)
+ ... plot = ' ' * int(density * 400) + 'x'
+ ... print(f'{x:2}: {density:.3f} {plot}')
+ ...
+ -6: 0.002 x
+ -5: 0.009 x
+ -4: 0.031 x
+ -3: 0.070 x
+ -2: 0.111 x
+ -1: 0.125 x
+ 0: 0.110 x
+ 1: 0.086 x
+ 2: 0.068 x
+ 3: 0.059 x
+ 4: 0.066 x
+ 5: 0.082 x
+ 6: 0.082 x
+ 7: 0.058 x
+ 8: 0.028 x
+ 9: 0.009 x
+ 10: 0.002 x
+
+ Estimate P(4.5 < X <= 7.5), the probability that a new sample value
+ will be between 4.5 and 7.5:
+
+ >>> cdf = kde(sample, h=1.5, cumulative=True)
+ >>> round(cdf(7.5) - cdf(4.5), 2)
+ 0.22
+
+ References
+ ----------
+
+ Kernel density estimation and its application:
+ https://www.itm-conferences.org/articles/itmconf/pdf/2018/08/itmconf_sam2018_00037.pdf
+
+ Kernel functions in common use:
+ https://en.wikipedia.org/wiki/Kernel_(statistics)#kernel_functions_in_common_use
+
+ Interactive graphical demonstration and exploration:
+ https://demonstrations.wolfram.com/KernelDensityEstimation/
+
+ Kernel estimation of cumulative distribution function of a random variable with bounded support
+ https://www.econstor.eu/bitstream/10419/207829/1/10.21307_stattrans-2016-037.pdf
+
+ """
+
+ n = len(data)
+ if not n:
+ raise StatisticsError('Empty data sequence')
+
+ if not isinstance(data[0], (int, float)):
+ raise TypeError('Data sequence must contain ints or floats')
+
+ if h <= 0.0:
+ raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}')
+
+ match kernel:
+
+ case 'normal' | 'gauss':
+ sqrt2pi = sqrt(2 * pi)
+ sqrt2 = sqrt(2)
+ K = lambda t: exp(-1/2 * t * t) / sqrt2pi
+ W = lambda t: 1/2 * (1.0 + erf(t / sqrt2))
+ support = None
+
+ case 'logistic':
+ # 1.0 / (exp(t) + 2.0 + exp(-t))
+ K = lambda t: 1/2 / (1.0 + cosh(t))
+ W = lambda t: 1.0 - 1.0 / (exp(t) + 1.0)
+ support = None
+
+ case 'sigmoid':
+ # (2/pi) / (exp(t) + exp(-t))
+ c1 = 1 / pi
+ c2 = 2 / pi
+ K = lambda t: c1 / cosh(t)
+ W = lambda t: c2 * atan(exp(t))
+ support = None
+
+ case 'rectangular' | 'uniform':
+ K = lambda t: 1/2
+ W = lambda t: 1/2 * t + 1/2
+ support = 1.0
+
+ case 'triangular':
+ K = lambda t: 1.0 - abs(t)
+ W = lambda t: t*t * (1/2 if t < 0.0 else -1/2) + t + 1/2
+ support = 1.0
+
+ case 'parabolic' | 'epanechnikov':
+ K = lambda t: 3/4 * (1.0 - t * t)
+ W = lambda t: -1/4 * t**3 + 3/4 * t + 1/2
+ support = 1.0
+
+ case 'quartic' | 'biweight':
+ K = lambda t: 15/16 * (1.0 - t * t) ** 2
+ W = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2
+ support = 1.0
+
+ case 'triweight':
+ K = lambda t: 35/32 * (1.0 - t * t) ** 3
+ W = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2
+ support = 1.0
+
+ case 'cosine':
+ c1 = pi / 4
+ c2 = pi / 2
+ K = lambda t: c1 * cos(c2 * t)
+ W = lambda t: 1/2 * sin(c2 * t) + 1/2
+ support = 1.0
+
+ case _:
+ raise StatisticsError(f'Unknown kernel name: {kernel!r}')
+
+ if support is None:
+
+ def pdf(x):
+ n = len(data)
+ return sum(K((x - x_i) / h) for x_i in data) / (n * h)
+
+ def cdf(x):
+ n = len(data)
+ return sum(W((x - x_i) / h) for x_i in data) / n
+
+ else:
+
+ sample = sorted(data)
+ bandwidth = h * support
+
+ def pdf(x):
+ nonlocal n, sample
+ if len(data) != n:
+ sample = sorted(data)
+ n = len(data)
+ i = bisect_left(sample, x - bandwidth)
+ j = bisect_right(sample, x + bandwidth)
+ supported = sample[i : j]
+ return sum(K((x - x_i) / h) for x_i in supported) / (n * h)
+
+ def cdf(x):
+ nonlocal n, sample
+ if len(data) != n:
+ sample = sorted(data)
+ n = len(data)
+ i = bisect_left(sample, x - bandwidth)
+ j = bisect_right(sample, x + bandwidth)
+ supported = sample[i : j]
+ return sum((W((x - x_i) / h) for x_i in supported), i) / n
+
+ if cumulative:
+ cdf.__doc__ = f'CDF estimate with {h=!r} and {kernel=!r}'
+ return cdf
+
+ else:
+ pdf.__doc__ = f'PDF estimate with {h=!r} and {kernel=!r}'
+ return pdf
# Notes on methods for computing quantiles
@@ -659,7 +1075,10 @@ def quantiles(data, *, n=4, method='exclusive'):
data = sorted(data)
ld = len(data)
if ld < 2:
- raise StatisticsError('must have at least two data points')
+ if ld == 1:
+ return data * (n - 1)
+ raise StatisticsError('must have at least one data point')
+
if method == 'inclusive':
m = ld - 1
result = []
@@ -668,6 +1087,7 @@ def quantiles(data, *, n=4, method='exclusive'):
interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n
result.append(interpolated)
return result
+
if method == 'exclusive':
m = ld + 1
result = []
@@ -678,6 +1098,7 @@ def quantiles(data, *, n=4, method='exclusive'):
interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n
result.append(interpolated)
return result
+
raise ValueError(f'Unknown method: {method!r}')
@@ -685,41 +1106,6 @@ def quantiles(data, *, n=4, method='exclusive'):
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
-# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
-#
-# Under no circumstances use the so-called "computational formula for
-# variance", as that is only suitable for hand calculations with a small
-# amount of low-precision data. It has terrible numeric properties.
-#
-# See a comparison of three computational methods here:
-# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
-
-def _ss(data, c=None):
- """Return sum of square deviations of sequence data.
-
- If ``c`` is None, the mean is calculated in one pass, and the deviations
- from the mean are calculated in a second pass. Otherwise, deviations are
- calculated from ``c`` as given. Use the second case with care, as it can
- lead to garbage results.
- """
- if c is not None:
- T, total, count = _sum((x-c)**2 for x in data)
- return (T, total)
- T, total, count = _sum(data)
- mean_n, mean_d = (total / count).as_integer_ratio()
- partials = Counter()
- for n, d in map(_exact_ratio, data):
- diff_n = n * mean_d - d * mean_n
- diff_d = d * mean_d
- partials[diff_d * diff_d] += diff_n * diff_n
- if None in partials:
- # The sum will be a NAN or INF. We can ignore all the finite
- # partials, and just look at this special one.
- total = partials[None]
- assert not _isfinite(total)
- else:
- total = sum(Fraction(n, d) for d, n in partials.items())
- return (T, total)
def variance(data, xbar=None):
@@ -760,12 +1146,9 @@ def variance(data, xbar=None):
Fraction(67, 108)
"""
- if iter(data) is data:
- data = list(data)
- n = len(data)
+ T, ss, c, n = _ss(data, xbar)
if n < 2:
raise StatisticsError('variance requires at least two data points')
- T, ss = _ss(data, xbar)
return _convert(ss / (n - 1), T)
@@ -804,12 +1187,9 @@ def pvariance(data, mu=None):
Fraction(13, 72)
"""
- if iter(data) is data:
- data = list(data)
- n = len(data)
+ T, ss, c, n = _ss(data, mu)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
- T, ss = _ss(data, mu)
return _convert(ss / n, T)
@@ -822,14 +1202,13 @@ def stdev(data, xbar=None):
1.0810874155219827
"""
- # Fixme: Despite the exact sum of squared deviations, some inaccuracy
- # remain because there are two rounding steps. The first occurs in
- # the _convert() step for variance(), the second occurs in math.sqrt().
- var = variance(data, xbar)
- try:
- return var.sqrt()
- except AttributeError:
- return math.sqrt(var)
+ T, ss, c, n = _ss(data, xbar)
+ if n < 2:
+ raise StatisticsError('stdev requires at least two data points')
+ mss = ss / (n - 1)
+ if issubclass(T, Decimal):
+ return _decimal_sqrt_of_frac(mss.numerator, mss.denominator)
+ return _float_sqrt_of_frac(mss.numerator, mss.denominator)
def pstdev(data, mu=None):
@@ -841,14 +1220,47 @@ def pstdev(data, mu=None):
0.986893273527251
"""
- # Fixme: Despite the exact sum of squared deviations, some inaccuracy
- # remain because there are two rounding steps. The first occurs in
- # the _convert() step for pvariance(), the second occurs in math.sqrt().
- var = pvariance(data, mu)
+ T, ss, c, n = _ss(data, mu)
+ if n < 1:
+ raise StatisticsError('pstdev requires at least one data point')
+ mss = ss / n
+ if issubclass(T, Decimal):
+ return _decimal_sqrt_of_frac(mss.numerator, mss.denominator)
+ return _float_sqrt_of_frac(mss.numerator, mss.denominator)
+
+
+def _mean_stdev(data):
+ """In one pass, compute the mean and sample standard deviation as floats."""
+ T, ss, xbar, n = _ss(data)
+ if n < 2:
+ raise StatisticsError('stdev requires at least two data points')
+ mss = ss / (n - 1)
try:
- return var.sqrt()
+ return float(xbar), _float_sqrt_of_frac(mss.numerator, mss.denominator)
except AttributeError:
- return math.sqrt(var)
+ # Handle Nans and Infs gracefully
+ return float(xbar), float(xbar) / float(ss)
+
+def _sqrtprod(x: float, y: float) -> float:
+ "Return sqrt(x * y) computed with improved accuracy and without overflow/underflow."
+ h = sqrt(x * y)
+ if not isfinite(h):
+ if isinf(h) and not isinf(x) and not isinf(y):
+ # Finite inputs overflowed, so scale down, and recompute.
+ scale = 2.0 ** -512 # sqrt(1 / sys.float_info.max)
+ return _sqrtprod(scale * x, scale * y) / scale
+ return h
+ if not h:
+ if x and y:
+ # Non-zero inputs underflowed, so scale up, and recompute.
+ # Scale: 1 / sqrt(sys.float_info.min * sys.float_info.epsilon)
+ scale = 2.0 ** 537
+ return _sqrtprod(scale * x, scale * y) / scale
+ return h
+ # Improve accuracy with a differential correction.
+ # https://www.wolframalpha.com/input/?i=Maclaurin+series+sqrt%28h**2+%2B+x%29+at+x%3D0
+ d = sumprod((x, h), (y, -h))
+ return h + d / (2.0 * h)
# === Statistics for relations between two inputs ===
@@ -882,18 +1294,16 @@ def covariance(x, y, /):
raise StatisticsError('covariance requires at least two data points')
xbar = fsum(x) / n
ybar = fsum(y) / n
- sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
+ sxy = sumprod((xi - xbar for xi in x), (yi - ybar for yi in y))
return sxy / (n - 1)
-def correlation(x, y, /):
+def correlation(x, y, /, *, method='linear'):
"""Pearson's correlation coefficient
Return the Pearson's correlation coefficient for two inputs. Pearson's
- correlation coefficient *r* takes values between -1 and +1. It measures the
- strength and direction of the linear relationship, where +1 means very
- strong, positive linear relationship, -1 very strong, negative linear
- relationship, and 0 no linear relationship.
+ correlation coefficient *r* takes values between -1 and +1. It measures
+ the strength and direction of a linear relationship.
>>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1]
@@ -902,19 +1312,36 @@ def correlation(x, y, /):
>>> correlation(x, y)
-1.0
+ If *method* is "ranked", computes Spearman's rank correlation coefficient
+ for two inputs. The data is replaced by ranks. Ties are averaged
+ so that equal values receive the same rank. The resulting coefficient
+ measures the strength of a monotonic relationship.
+
+ Spearman's rank correlation coefficient is appropriate for ordinal
+ data or for continuous data that doesn't meet the linear proportion
+ requirement for Pearson's correlation coefficient.
"""
n = len(x)
if len(y) != n:
raise StatisticsError('correlation requires that both inputs have same number of data points')
if n < 2:
raise StatisticsError('correlation requires at least two data points')
- xbar = fsum(x) / n
- ybar = fsum(y) / n
- sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
- sxx = fsum((xi - xbar) ** 2.0 for xi in x)
- syy = fsum((yi - ybar) ** 2.0 for yi in y)
+ if method not in {'linear', 'ranked'}:
+ raise ValueError(f'Unknown method: {method!r}')
+ if method == 'ranked':
+ start = (n - 1) / -2 # Center rankings around zero
+ x = _rank(x, start=start)
+ y = _rank(y, start=start)
+ else:
+ xbar = fsum(x) / n
+ ybar = fsum(y) / n
+ x = [xi - xbar for xi in x]
+ y = [yi - ybar for yi in y]
+ sxy = sumprod(x, y)
+ sxx = sumprod(x, x)
+ syy = sumprod(y, y)
try:
- return sxy / sqrt(sxx * syy)
+ return sxy / _sqrtprod(sxx, syy)
except ZeroDivisionError:
raise StatisticsError('at least one of the inputs is constant')
@@ -922,13 +1349,13 @@ def correlation(x, y, /):
LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept'))
-def linear_regression(x, y, /):
+def linear_regression(x, y, /, *, proportional=False):
"""Slope and intercept for simple linear regression.
Return the slope and intercept of simple linear regression
parameters estimated using ordinary least squares. Simple linear
regression describes relationship between an independent variable
- *x* and a dependent variable *y* in terms of linear function:
+ *x* and a dependent variable *y* in terms of a linear function:
y = slope * x + intercept + noise
@@ -944,7 +1371,20 @@ def linear_regression(x, y, /):
>>> noise = NormalDist().samples(5, seed=42)
>>> y = [3 * x[i] + 2 + noise[i] for i in range(5)]
>>> linear_regression(x, y) #doctest: +ELLIPSIS
- LinearRegression(slope=3.09078914170..., intercept=1.75684970486...)
+ LinearRegression(slope=3.17495..., intercept=1.00925...)
+
+ If *proportional* is true, the independent variable *x* and the
+ dependent variable *y* are assumed to be directly proportional.
+ The data is fit to a line passing through the origin.
+
+ Since the *intercept* will always be 0.0, the underlying linear
+ function simplifies to:
+
+ y = slope * x + noise
+
+ >>> y = [3 * x[i] + noise[i] for i in range(5)]
+ >>> linear_regression(x, y, proportional=True) #doctest: +ELLIPSIS
+ LinearRegression(slope=2.90475..., intercept=0.0)
"""
n = len(x)
@@ -952,15 +1392,18 @@ def linear_regression(x, y, /):
raise StatisticsError('linear regression requires that both inputs have same number of data points')
if n < 2:
raise StatisticsError('linear regression requires at least two data points')
- xbar = fsum(x) / n
- ybar = fsum(y) / n
- sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
- sxx = fsum((xi - xbar) ** 2.0 for xi in x)
+ if not proportional:
+ xbar = fsum(x) / n
+ ybar = fsum(y) / n
+ x = [xi - xbar for xi in x] # List because used three times below
+ y = (yi - ybar for yi in y) # Generator because only used once below
+ sxy = sumprod(x, y) + 0.0 # Add zero to coerce result to a float
+ sxx = sumprod(x, x)
try:
slope = sxy / sxx # equivalent to: covariance(x, y) / variance(x)
except ZeroDivisionError:
raise StatisticsError('x is constant')
- intercept = ybar - slope * xbar
+ intercept = 0.0 if proportional else ybar - slope * xbar
return LinearRegression(slope=slope, intercept=intercept)
@@ -1068,29 +1511,29 @@ def __init__(self, mu=0.0, sigma=1.0):
@classmethod
def from_samples(cls, data):
"Make a normal distribution instance from sample data."
- if not isinstance(data, (list, tuple)):
- data = list(data)
- xbar = fmean(data)
- return cls(xbar, stdev(data, xbar))
+ return cls(*_mean_stdev(data))
def samples(self, n, *, seed=None):
"Generate *n* samples for a given mean and standard deviation."
- gauss = random.gauss if seed is None else random.Random(seed).gauss
- mu, sigma = self._mu, self._sigma
- return [gauss(mu, sigma) for i in range(n)]
+ rnd = random.random if seed is None else random.Random(seed).random
+ inv_cdf = _normal_dist_inv_cdf
+ mu = self._mu
+ sigma = self._sigma
+ return [inv_cdf(rnd(), mu, sigma) for _ in repeat(None, n)]
def pdf(self, x):
"Probability density function. P(x <= X < x+dx) / dx"
- variance = self._sigma ** 2.0
+ variance = self._sigma * self._sigma
if not variance:
raise StatisticsError('pdf() not defined when sigma is zero')
- return exp((x - self._mu)**2.0 / (-2.0*variance)) / sqrt(tau*variance)
+ diff = x - self._mu
+ return exp(diff * diff / (-2.0 * variance)) / sqrt(tau * variance)
def cdf(self, x):
"Cumulative distribution function. P(X <= x)"
if not self._sigma:
raise StatisticsError('cdf() not defined when sigma is zero')
- return 0.5 * (1.0 + erf((x - self._mu) / (self._sigma * sqrt(2.0))))
+ return 0.5 * (1.0 + erf((x - self._mu) / (self._sigma * _SQRT2)))
def inv_cdf(self, p):
"""Inverse cumulative distribution function. x : P(X <= x) = p
@@ -1104,8 +1547,6 @@ def inv_cdf(self, p):
"""
if p <= 0.0 or p >= 1.0:
raise StatisticsError('p must be in the range 0.0 < p < 1.0')
- if self._sigma <= 0.0:
- raise StatisticsError('cdf() not defined when sigma at or below zero')
return _normal_dist_inv_cdf(p, self._mu, self._sigma)
def quantiles(self, n=4):
@@ -1146,9 +1587,9 @@ def overlap(self, other):
dv = Y_var - X_var
dm = fabs(Y._mu - X._mu)
if not dv:
- return 1.0 - erf(dm / (2.0 * X._sigma * sqrt(2.0)))
+ return 1.0 - erf(dm / (2.0 * X._sigma * _SQRT2))
a = X._mu * Y_var - Y._mu * X_var
- b = X._sigma * Y._sigma * sqrt(dm**2.0 + dv * log(Y_var / X_var))
+ b = X._sigma * Y._sigma * sqrt(dm * dm + dv * log(Y_var / X_var))
x1 = (a + b) / dv
x2 = (a - b) / dv
return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2)))
@@ -1191,7 +1632,7 @@ def stdev(self):
@property
def variance(self):
"Square of the standard deviation."
- return self._sigma ** 2.0
+ return self._sigma * self._sigma
def __add__(x1, x2):
"""Add a constant or another NormalDist instance.
@@ -1265,3 +1706,102 @@ def __hash__(self):
def __repr__(self):
return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})'
+
+ def __getstate__(self):
+ return self._mu, self._sigma
+
+ def __setstate__(self, state):
+ self._mu, self._sigma = state
+
+
+## kde_random() ##############################################################
+
+def _newton_raphson(f_inv_estimate, f, f_prime, tolerance=1e-12):
+ def f_inv(y):
+ "Return x such that f(x) ≈ y within the specified tolerance."
+ x = f_inv_estimate(y)
+ while abs(diff := f(x) - y) > tolerance:
+ x -= diff / f_prime(x)
+ return x
+ return f_inv
+
+def _quartic_invcdf_estimate(p):
+ sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p)
+ x = (2.0 * p) ** 0.4258865685331 - 1.0
+ if p >= 0.004 < 0.499:
+ x += 0.026818732 * sin(7.101753784 * p + 2.73230839482953)
+ return x * sign
+
+_quartic_invcdf = _newton_raphson(
+ f_inv_estimate = _quartic_invcdf_estimate,
+ f = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2,
+ f_prime = lambda t: 15/16 * (1.0 - t * t) ** 2)
+
+def _triweight_invcdf_estimate(p):
+ sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p)
+ x = (2.0 * p) ** 0.3400218741872791 - 1.0
+ return x * sign
+
+_triweight_invcdf = _newton_raphson(
+ f_inv_estimate = _triweight_invcdf_estimate,
+ f = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2,
+ f_prime = lambda t: 35/32 * (1.0 - t * t) ** 3)
+
+_kernel_invcdfs = {
+ 'normal': NormalDist().inv_cdf,
+ 'logistic': lambda p: log(p / (1 - p)),
+ 'sigmoid': lambda p: log(tan(p * pi/2)),
+ 'rectangular': lambda p: 2*p - 1,
+ 'parabolic': lambda p: 2 * cos((acos(2*p-1) + pi) / 3),
+ 'quartic': _quartic_invcdf,
+ 'triweight': _triweight_invcdf,
+ 'triangular': lambda p: sqrt(2*p) - 1 if p < 1/2 else 1 - sqrt(2 - 2*p),
+ 'cosine': lambda p: 2 * asin(2*p - 1) / pi,
+}
+_kernel_invcdfs['gauss'] = _kernel_invcdfs['normal']
+_kernel_invcdfs['uniform'] = _kernel_invcdfs['rectangular']
+_kernel_invcdfs['epanechnikov'] = _kernel_invcdfs['parabolic']
+_kernel_invcdfs['biweight'] = _kernel_invcdfs['quartic']
+
+def kde_random(data, h, kernel='normal', *, seed=None):
+ """Return a function that makes a random selection from the estimated
+ probability density function created by kde(data, h, kernel).
+
+ Providing a *seed* allows reproducible selections within a single
+ thread. The seed may be an integer, float, str, or bytes.
+
+ A StatisticsError will be raised if the *data* sequence is empty.
+
+ Example:
+
+ >>> data = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2]
+ >>> rand = kde_random(data, h=1.5, seed=8675309)
+ >>> new_selections = [rand() for i in range(10)]
+ >>> [round(x, 1) for x in new_selections]
+ [0.7, 6.2, 1.2, 6.9, 7.0, 1.8, 2.5, -0.5, -1.8, 5.6]
+
+ """
+ n = len(data)
+ if not n:
+ raise StatisticsError('Empty data sequence')
+
+ if not isinstance(data[0], (int, float)):
+ raise TypeError('Data sequence must contain ints or floats')
+
+ if h <= 0.0:
+ raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}')
+
+ kernel_invcdf = _kernel_invcdfs.get(kernel)
+ if kernel_invcdf is None:
+ raise StatisticsError(f'Unknown kernel name: {kernel!r}')
+
+ prng = _random.Random(seed)
+ random = prng.random
+ choice = prng.choice
+
+ def rand():
+ return choice(data) + h * kernel_invcdf(random())
+
+ rand.__doc__ = f'Random KDE selection with {h=!r} and {kernel=!r}'
+
+ return rand
diff --git a/Lib/tabnanny.py b/Lib/tabnanny.py
index 7973f26f98..d06c4c221e 100755
--- a/Lib/tabnanny.py
+++ b/Lib/tabnanny.py
@@ -23,8 +23,6 @@
import os
import sys
import tokenize
-if not hasattr(tokenize, 'NL'):
- raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
@@ -37,6 +35,7 @@ def errprint(*args):
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
+ sys.exit(1)
def main():
import getopt
@@ -46,7 +45,6 @@ def main():
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
- return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
@@ -54,7 +52,6 @@ def main():
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
- return
for arg in args:
check(arg)
@@ -114,6 +111,10 @@ def check(file):
errprint("%r: Indentation Error: %s" % (file, msg))
return
+ except SyntaxError as msg:
+ errprint("%r: Syntax Error: %s" % (file, msg))
+ return
+
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
@@ -275,6 +276,12 @@ def format_witnesses(w):
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
+ try:
+ _process_tokens(tokens)
+ except TabError as e:
+ raise NannyNag(e.lineno, e.msg, e.text)
+
+def _process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 9e688efb1e..0b8de96f1b 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -12,6 +12,7 @@
import sys
import os
import gc
+import importlib
import errno
import functools
import signal
@@ -19,10 +20,11 @@
import socket
import random
import logging
+import shutil
import subprocess
import struct
+import tempfile
import operator
-import pathlib
import pickle
import weakref
import warnings
@@ -50,7 +52,7 @@
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
-from multiprocessing.connection import wait, AuthenticationError
+from multiprocessing.connection import wait
from multiprocessing import util
@@ -255,6 +257,9 @@ def __call__(self, *args, **kwds):
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
+ # If not empty, limit which start method suites run this class.
+ START_METHODS: set[str] = set()
+ start_method = None # set by install_tests_in_module_dict()
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
@@ -324,8 +329,9 @@ def test_set_executable(self):
self.skipTest(f'test not appropriate for {self.TYPE}')
paths = [
sys.executable, # str
- sys.executable.encode(), # bytes
- pathlib.Path(sys.executable) # os.PathLike
+ os.fsencode(sys.executable), # bytes
+ os_helper.FakePath(sys.executable), # os.PathLike
+ os_helper.FakePath(os.fsencode(sys.executable)), # os.PathLike bytes
]
for path in paths:
self.set_executable(path)
@@ -505,6 +511,11 @@ def _test_process_mainthread_native_id(cls, q):
def _sleep_some(cls):
time.sleep(100)
+ @classmethod
+ def _sleep_some_event(cls, event):
+ event.set()
+ time.sleep(100)
+
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
@@ -513,7 +524,8 @@ def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
- p = self.Process(target=self._sleep_some)
+ event = self.Event()
+ p = self.Process(target=self._sleep_some_event, args=(event,))
p.daemon = True
p.start()
@@ -531,8 +543,11 @@ def _kill_process(self, meth):
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
- # XXX maybe terminating too soon causes the problems on Gentoo...
- time.sleep(1)
+ timeout = support.SHORT_TIMEOUT
+ if not event.wait(timeout):
+ p.terminate()
+ p.join()
+ self.fail(f"event not signaled in {timeout} seconds")
meth(p)
@@ -582,12 +597,16 @@ def test_cpu_count(self):
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
- p = self.Process(target=time.sleep, args=(DELTA,))
+ event = self.Event()
+ p = self.Process(target=event.wait, args=())
self.assertNotIn(p, self.active_children())
- p.daemon = True
- p.start()
- self.assertIn(p, self.active_children())
+ try:
+ p.daemon = True
+ p.start()
+ self.assertIn(p, self.active_children())
+ finally:
+ event.set()
p.join()
self.assertNotIn(p, self.active_children())
@@ -1332,6 +1351,23 @@ def _on_queue_feeder_error(e, obj):
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
+ def test_closed_queue_empty_exceptions(self):
+ # Assert that checking the emptiness of an unused closed queue
+ # does not raise an OSError. The rationale is that q.close() is
+ # a no-op upon construction and becomes effective once the queue
+ # has been used (e.g., by calling q.put()).
+ for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
+ q.close() # this is a no-op since the feeder thread is None
+ q.join_thread() # this is also a no-op
+ self.assertTrue(q.empty())
+
+ for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
+ q.put('foo') # make sure that the queue is 'used'
+ q.close() # close the feeder thread
+ q.join_thread() # make sure to join the feeder thread
+ with self.assertRaisesRegex(OSError, 'is closed'):
+ q.empty()
+
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
@@ -1345,6 +1381,66 @@ def test_closed_queue_put_get_exceptions(self):
class _TestLock(BaseTestCase):
+ @staticmethod
+ def _acquire(lock, l=None):
+ lock.acquire()
+ if l is not None:
+ l.append(repr(lock))
+
+ @staticmethod
+ def _acquire_event(lock, event):
+ lock.acquire()
+ event.set()
+ time.sleep(1.0)
+
+ def test_repr_lock(self):
+ if self.TYPE != 'processes':
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
+
+ lock = self.Lock()
+ self.assertEqual(f'', repr(lock))
+
+ lock.acquire()
+ self.assertEqual(f'', repr(lock))
+ lock.release()
+
+ tname = 'T1'
+ l = []
+ t = threading.Thread(target=self._acquire,
+ args=(lock, l),
+ name=tname)
+ t.start()
+ time.sleep(0.1)
+ self.assertEqual(f'', l[0])
+ lock.release()
+
+ t = threading.Thread(target=self._acquire,
+ args=(lock,),
+ name=tname)
+ t.start()
+ time.sleep(0.1)
+ self.assertEqual('', repr(lock))
+ lock.release()
+
+ pname = 'P1'
+ l = multiprocessing.Manager().list()
+ p = self.Process(target=self._acquire,
+ args=(lock, l),
+ name=pname)
+ p.start()
+ p.join()
+ self.assertEqual(f'', l[0])
+
+ lock = self.Lock()
+ event = self.Event()
+ p = self.Process(target=self._acquire_event,
+ args=(lock, event),
+ name='P2')
+ p.start()
+ event.wait()
+ self.assertEqual(f'', repr(lock))
+ p.terminate()
+
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
@@ -1352,6 +1448,68 @@ def test_lock(self):
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
+ @staticmethod
+ def _acquire_release(lock, timeout, l=None, n=1):
+ for _ in range(n):
+ lock.acquire()
+ if l is not None:
+ l.append(repr(lock))
+ time.sleep(timeout)
+ for _ in range(n):
+ lock.release()
+
+ def test_repr_rlock(self):
+ if self.TYPE != 'processes':
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
+
+ lock = self.RLock()
+ self.assertEqual('', repr(lock))
+
+ n = 3
+ for _ in range(n):
+ lock.acquire()
+ self.assertEqual(f'', repr(lock))
+ for _ in range(n):
+ lock.release()
+
+ t, l = [], []
+ for i in range(n):
+ t.append(threading.Thread(target=self._acquire_release,
+ args=(lock, 0.1, l, i+1),
+ name=f'T{i+1}'))
+ t[-1].start()
+ for t_ in t:
+ t_.join()
+ for i in range(n):
+ self.assertIn(f'', l)
+
+
+ t = threading.Thread(target=self._acquire_release,
+ args=(lock, 0.2),
+ name=f'T1')
+ t.start()
+ time.sleep(0.1)
+ self.assertEqual('', repr(lock))
+ time.sleep(0.2)
+
+ pname = 'P1'
+ l = multiprocessing.Manager().list()
+ p = self.Process(target=self._acquire_release,
+ args=(lock, 0.1, l),
+ name=pname)
+ p.start()
+ p.join()
+ self.assertEqual(f'', l[0])
+
+ event = self.Event()
+ lock = self.RLock()
+ p = self.Process(target=self._acquire_event,
+ args=(lock, event))
+ p.start()
+ event.wait()
+ self.assertEqual('', repr(lock))
+ p.join()
+
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
@@ -1432,14 +1590,13 @@ def f(cls, cond, sleeping, woken, timeout=None):
cond.release()
def assertReachesEventually(self, func, value):
- for i in range(10):
+ for _ in support.sleeping_retry(support.SHORT_TIMEOUT):
try:
if func() == value:
break
except NotImplementedError:
break
- time.sleep(DELTA)
- time.sleep(DELTA)
+
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
@@ -1461,20 +1618,17 @@ def test_notify(self):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
- self.addCleanup(p.join)
- p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
- p.daemon = True
- p.start()
- self.addCleanup(p.join)
+ t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
+ t.daemon = True
+ t.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(0, get_value, woken)
+ self.assertReachesEventually(lambda: get_value(woken), 0)
# wake up one process/thread
cond.acquire()
@@ -1482,8 +1636,7 @@ def test_notify(self):
cond.release()
# check one process/thread has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(1, get_value, woken)
+ self.assertReachesEventually(lambda: get_value(woken), 1)
# wake up another
cond.acquire()
@@ -1491,12 +1644,13 @@ def test_notify(self):
cond.release()
# check other has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(2, get_value, woken)
+ self.assertReachesEventually(lambda: get_value(woken), 2)
# check state is not mucked up
self.check_invariant(cond)
- p.join()
+
+ threading_helper.join_thread(t)
+ join_process(p)
def test_notify_all(self):
cond = self.Condition()
@@ -1504,18 +1658,19 @@ def test_notify_all(self):
woken = self.Semaphore(0)
# start some threads/processes which will timeout
+ workers = []
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
- self.addCleanup(p.join)
+ workers.append(p)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
- self.addCleanup(t.join)
+ workers.append(t)
# wait for them all to sleep
for i in range(6):
@@ -1534,12 +1689,12 @@ def test_notify_all(self):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
- self.addCleanup(p.join)
+ workers.append(p)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
- self.addCleanup(t.join)
+ workers.append(t)
# wait for them to all sleep
for i in range(6):
@@ -1555,27 +1710,34 @@ def test_notify_all(self):
cond.release()
# check they have all woken
- self.assertReachesEventually(lambda: get_value(woken), 6)
+ for i in range(6):
+ woken.acquire()
+ self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
+ for w in workers:
+ # NOTE: join_process and join_thread are the same
+ threading_helper.join_thread(w)
+
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
+ workers = []
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
- self.addCleanup(p.join)
+ workers.append(p)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
- self.addCleanup(t.join)
+ workers.append(t)
# wait for them to all sleep
for i in range(6):
@@ -1610,6 +1772,10 @@ def test_notify_n(self):
# check state is not mucked up
self.check_invariant(cond)
+ for w in workers:
+ # NOTE: join_process and join_thread are the same
+ threading_helper.join_thread(w)
+
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
@@ -2812,8 +2978,8 @@ def test_release_task_refs(self):
self.pool.map(identity, objs)
del objs
- gc.collect() # For PyPy or other GCs.
time.sleep(DELTA) # let threaded cleanup code run
+ support.gc_collect() # For PyPy or other GCs.
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
@@ -3174,6 +3340,44 @@ def test_rapid_restart(self):
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
+
+class FakeConnection:
+ def send(self, payload):
+ pass
+
+ def recv(self):
+ return '#ERROR', pyqueue.Empty()
+
+class TestManagerExceptions(unittest.TestCase):
+ # Issue 106558: Manager exceptions avoids creating cyclic references.
+ def setUp(self):
+ self.mgr = multiprocessing.Manager()
+
+ def tearDown(self):
+ self.mgr.shutdown()
+ self.mgr.join()
+
+ def test_queue_get(self):
+ queue = self.mgr.Queue()
+ if gc.isenabled():
+ gc.disable()
+ self.addCleanup(gc.enable)
+ try:
+ queue.get_nowait()
+ except pyqueue.Empty as e:
+ wr = weakref.ref(e)
+ self.assertEqual(wr(), None)
+
+ def test_dispatch(self):
+ if gc.isenabled():
+ gc.disable()
+ self.addCleanup(gc.enable)
+ try:
+ multiprocessing.managers.dispatch(FakeConnection(), None, None)
+ except pyqueue.Empty as e:
+ wr = weakref.ref(e)
+ self.assertEqual(wr(), None)
+
#
#
#
@@ -4462,6 +4666,59 @@ def test_shared_memory_cleaned_after_process_termination(self):
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
+ @unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
+ def test_shared_memory_untracking(self):
+ # gh-82300: When a separate Python process accesses shared memory
+ # with track=False, it must not cause the memory to be deleted
+ # when terminating.
+ cmd = '''if 1:
+ import sys
+ from multiprocessing.shared_memory import SharedMemory
+ mem = SharedMemory(create=False, name=sys.argv[1], track=False)
+ mem.close()
+ '''
+ mem = shared_memory.SharedMemory(create=True, size=10)
+ # The resource tracker shares pipes with the subprocess, and so
+ # err existing means that the tracker process has terminated now.
+ try:
+ rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name)
+ self.assertNotIn(b"resource_tracker", err)
+ self.assertEqual(rc, 0)
+ mem2 = shared_memory.SharedMemory(create=False, name=mem.name)
+ mem2.close()
+ finally:
+ try:
+ mem.unlink()
+ except OSError:
+ pass
+ mem.close()
+
+ @unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
+ def test_shared_memory_tracking(self):
+ # gh-82300: When a separate Python process accesses shared memory
+ # with track=True, it must cause the memory to be deleted when
+ # terminating.
+ cmd = '''if 1:
+ import sys
+ from multiprocessing.shared_memory import SharedMemory
+ mem = SharedMemory(create=False, name=sys.argv[1], track=True)
+ mem.close()
+ '''
+ mem = shared_memory.SharedMemory(create=True, size=10)
+ try:
+ rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name)
+ self.assertEqual(rc, 0)
+ self.assertIn(
+ b"resource_tracker: There appear to be 1 leaked "
+ b"shared_memory objects to clean up at shutdown", err)
+ finally:
+ try:
+ mem.unlink()
+ except OSError:
+ pass
+ resource_tracker.unregister(mem._name, "shared_memory")
+ mem.close()
+
#
# Test to verify that `Finalize` works.
#
@@ -4571,7 +4828,7 @@ def make_finalizers():
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
- sys.setswitchinterval(1e-6)
+ support.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
@@ -5557,8 +5814,9 @@ def create_and_register_resource(rtype):
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
- if rtype == "noop":
+ if rtype in ("noop", "dummy"):
# Artefact resource type used by the resource_tracker
+ # or tests
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
@@ -5638,6 +5896,8 @@ def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
+ @unittest.skipIf(sys.platform.startswith("netbsd"),
+ "gh-125620: Skip on NetBSD due to long wait for SIGKILL process termination.")
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@@ -5678,6 +5938,59 @@ def test_too_long_name_resource(self):
with self.assertRaises(ValueError):
resource_tracker.register(too_long_name_resource, rtype)
+ def _test_resource_tracker_leak_resources(self, cleanup):
+ # We use a separate instance for testing, since the main global
+ # _resource_tracker may be used to watch test infrastructure.
+ from multiprocessing.resource_tracker import ResourceTracker
+ tracker = ResourceTracker()
+ tracker.ensure_running()
+ self.assertTrue(tracker._check_alive())
+
+ self.assertIsNone(tracker._exitcode)
+ tracker.register('somename', 'dummy')
+ if cleanup:
+ tracker.unregister('somename', 'dummy')
+ expected_exit_code = 0
+ else:
+ expected_exit_code = 1
+
+ self.assertTrue(tracker._check_alive())
+ self.assertIsNone(tracker._exitcode)
+ tracker._stop()
+ self.assertEqual(tracker._exitcode, expected_exit_code)
+
+ def test_resource_tracker_exit_code(self):
+ """
+ Test the exit code of the resource tracker.
+
+ If no leaked resources were found, exit code should be 0, otherwise 1
+ """
+ for cleanup in [True, False]:
+ with self.subTest(cleanup=cleanup):
+ self._test_resource_tracker_leak_resources(
+ cleanup=cleanup,
+ )
+
+ @unittest.skipUnless(hasattr(signal, "pthread_sigmask"), "pthread_sigmask is not available")
+ def test_resource_tracker_blocked_signals(self):
+ #
+ # gh-127586: Check that resource_tracker does not override blocked signals of caller.
+ #
+ from multiprocessing.resource_tracker import ResourceTracker
+ orig_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, set())
+ signals = {signal.SIGTERM, signal.SIGINT, signal.SIGUSR1}
+
+ try:
+ for sig in signals:
+ signal.pthread_sigmask(signal.SIG_SETMASK, {sig})
+ self.assertEqual(signal.pthread_sigmask(signal.SIG_BLOCK, set()), {sig})
+ tracker = ResourceTracker()
+ tracker.ensure_running()
+ self.assertEqual(signal.pthread_sigmask(signal.SIG_BLOCK, set()), {sig})
+ tracker._stop()
+ finally:
+ # restore sigmask to what it was before executing test
+ signal.pthread_sigmask(signal.SIG_SETMASK, orig_sigmask)
class TestSimpleQueue(unittest.TestCase):
@@ -5691,6 +6004,15 @@ def _test_empty(cls, queue, child_can_start, parent_can_continue):
finally:
parent_can_continue.set()
+ def test_empty_exceptions(self):
+ # Assert that checking emptiness of a closed queue raises
+ # an OSError, independently of whether the queue was used
+ # or not. This differs from Queue and JoinableQueue.
+ q = multiprocessing.SimpleQueue()
+ q.close() # close the pipe
+ with self.assertRaisesRegex(OSError, 'is closed'):
+ q.empty()
+
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
@@ -6037,6 +6359,99 @@ def submain(): pass
self.assertFalse(err, msg=err.decode('utf-8'))
+class _TestAtExit(BaseTestCase):
+
+ ALLOWED_TYPES = ('processes',)
+
+ @classmethod
+ def _write_file_at_exit(self, output_path):
+ import atexit
+ def exit_handler():
+ with open(output_path, 'w') as f:
+ f.write("deadbeef")
+ atexit.register(exit_handler)
+
+ def test_atexit(self):
+ # gh-83856
+ with os_helper.temp_dir() as temp_dir:
+ output_path = os.path.join(temp_dir, 'output.txt')
+ p = self.Process(target=self._write_file_at_exit, args=(output_path,))
+ p.start()
+ p.join()
+ with open(output_path) as f:
+ self.assertEqual(f.read(), 'deadbeef')
+
+
+class _TestSpawnedSysPath(BaseTestCase):
+ """Test that sys.path is setup in forkserver and spawn processes."""
+
+ ALLOWED_TYPES = {'processes'}
+ # Not applicable to fork which inherits everything from the process as is.
+ START_METHODS = {"forkserver", "spawn"}
+
+ def setUp(self):
+ self._orig_sys_path = list(sys.path)
+ self._temp_dir = tempfile.mkdtemp(prefix="test_sys_path-")
+ self._mod_name = "unique_test_mod"
+ module_path = os.path.join(self._temp_dir, f"{self._mod_name}.py")
+ with open(module_path, "w", encoding="utf-8") as mod:
+ mod.write("# A simple test module\n")
+ sys.path[:] = [p for p in sys.path if p] # remove any existing ""s
+ sys.path.insert(0, self._temp_dir)
+ sys.path.insert(0, "") # Replaced with an abspath in child.
+ self.assertIn(self.start_method, self.START_METHODS)
+ self._ctx = multiprocessing.get_context(self.start_method)
+
+ def tearDown(self):
+ sys.path[:] = self._orig_sys_path
+ shutil.rmtree(self._temp_dir, ignore_errors=True)
+
+ @staticmethod
+ def enq_imported_module_names(queue):
+ queue.put(tuple(sys.modules))
+
+ def test_forkserver_preload_imports_sys_path(self):
+ if self._ctx.get_start_method() != "forkserver":
+ self.skipTest("forkserver specific test.")
+ self.assertNotIn(self._mod_name, sys.modules)
+ multiprocessing.forkserver._forkserver._stop() # Must be fresh.
+ self._ctx.set_forkserver_preload(
+ ["test.test_multiprocessing_forkserver", self._mod_name])
+ q = self._ctx.Queue()
+ proc = self._ctx.Process(
+ target=self.enq_imported_module_names, args=(q,))
+ proc.start()
+ proc.join()
+ child_imported_modules = q.get()
+ q.close()
+ self.assertIn(self._mod_name, child_imported_modules)
+
+ @staticmethod
+ def enq_sys_path_and_import(queue, mod_name):
+ queue.put(sys.path)
+ try:
+ importlib.import_module(mod_name)
+ except ImportError as exc:
+ queue.put(exc)
+ else:
+ queue.put(None)
+
+ def test_child_sys_path(self):
+ q = self._ctx.Queue()
+ proc = self._ctx.Process(
+ target=self.enq_sys_path_and_import, args=(q, self._mod_name))
+ proc.start()
+ proc.join()
+ child_sys_path = q.get()
+ import_error = q.get()
+ q.close()
+ self.assertNotIn("", child_sys_path) # replaced by an abspath
+ self.assertIn(self._temp_dir, child_sys_path) # our addition
+ # ignore the first element, it is the absolute "" replacement
+ self.assertEqual(child_sys_path[1:], sys.path[1:])
+ self.assertIsNone(import_error, msg=f"child could not import {self._mod_name}")
+
+
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in not_exported are excluded
@@ -6061,6 +6476,46 @@ def test_spawn_sys_executable_none_allows_import(self):
self.assertEqual(rc, 0)
self.assertFalse(err, msg=err.decode('utf-8'))
+ def test_large_pool(self):
+ #
+ # gh-89240: Check that large pools are always okay
+ #
+ testfn = os_helper.TESTFN
+ self.addCleanup(os_helper.unlink, testfn)
+ with open(testfn, 'w', encoding='utf-8') as f:
+ f.write(textwrap.dedent('''\
+ import multiprocessing
+ def f(x): return x*x
+ if __name__ == '__main__':
+ with multiprocessing.Pool(200) as p:
+ print(sum(p.map(f, range(1000))))
+ '''))
+ rc, out, err = script_helper.assert_python_ok(testfn)
+ self.assertEqual("332833500", out.decode('utf-8').strip())
+ self.assertFalse(err, msg=err.decode('utf-8'))
+
+ def test_forked_thread_not_started(self):
+ # gh-134381: Ensure that a thread that has not been started yet in
+ # the parent process can be started within a forked child process.
+
+ if multiprocessing.get_start_method() != "fork":
+ self.skipTest("fork specific test")
+
+ q = multiprocessing.Queue()
+ t = threading.Thread(target=lambda: q.put("done"), daemon=True)
+
+ def child():
+ t.start()
+ t.join()
+
+ p = multiprocessing.Process(target=child)
+ p.start()
+ p.join(support.SHORT_TIMEOUT)
+
+ self.assertEqual(p.exitcode, 0)
+ self.assertEqual(q.get_nowait(), "done")
+ close_queue(q)
+
#
# Mixins
@@ -6213,6 +6668,8 @@ def install_tests_in_module_dict(remote_globs, start_method,
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
+ if base.START_METHODS and start_method not in base.START_METHODS:
+ continue # class not intended for this start method.
for type_ in base.ALLOWED_TYPES:
if only_type and type_ != only_type:
continue
@@ -6226,6 +6683,7 @@ class Temp(base, Mixin, unittest.TestCase):
Temp = hashlib_helper.requires_hashdigest('sha256')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
+ Temp.start_method = start_method
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
if only_type:
diff --git a/Lib/test/_test_venv_multiprocessing.py b/Lib/test/_test_venv_multiprocessing.py
new file mode 100644
index 0000000000..ad985dd8d5
--- /dev/null
+++ b/Lib/test/_test_venv_multiprocessing.py
@@ -0,0 +1,40 @@
+import multiprocessing
+import random
+import sys
+
+def fill_queue(queue, code):
+ queue.put(code)
+
+
+def drain_queue(queue, code):
+ if code != queue.get():
+ sys.exit(1)
+
+
+def test_func():
+ code = random.randrange(0, 1000)
+ queue = multiprocessing.Queue()
+ fill_pool = multiprocessing.Process(
+ target=fill_queue,
+ args=(queue, code)
+ )
+ drain_pool = multiprocessing.Process(
+ target=drain_queue,
+ args=(queue, code)
+ )
+ drain_pool.start()
+ fill_pool.start()
+ fill_pool.join()
+ drain_pool.join()
+
+
+def main():
+ multiprocessing.set_start_method('spawn')
+ test_pool = multiprocessing.Process(target=test_func)
+ test_pool.start()
+ test_pool.join()
+ sys.exit(test_pool.exitcode)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Lib/test/decimaltestdata/abs.decTest b/Lib/test/decimaltestdata/abs.decTest
index 01f73d7766..569b8fcd84 100644
--- a/Lib/test/decimaltestdata/abs.decTest
+++ b/Lib/test/decimaltestdata/abs.decTest
@@ -20,7 +20,7 @@
version: 2.59
-- This set of tests primarily tests the existence of the operator.
--- Additon, subtraction, rounding, and more overflows are tested
+-- Addition, subtraction, rounding, and more overflows are tested
-- elsewhere.
precision: 9
diff --git a/Lib/test/decimaltestdata/ddFMA.decTest b/Lib/test/decimaltestdata/ddFMA.decTest
index 9094fc015b..7f2e523037 100644
--- a/Lib/test/decimaltestdata/ddFMA.decTest
+++ b/Lib/test/decimaltestdata/ddFMA.decTest
@@ -1663,7 +1663,7 @@ ddfma375087 fma 1 12345678 1E-33 -> 12345678.00000001 Inexac
ddfma375088 fma 1 12345678 1E-34 -> 12345678.00000001 Inexact Rounded
ddfma375089 fma 1 12345678 1E-35 -> 12345678.00000001 Inexact Rounded
--- desctructive subtraction (from remainder tests)
+-- destructive subtraction (from remainder tests)
-- +++ some of these will be off-by-one remainder vs remainderNear
diff --git a/Lib/test/decimaltestdata/ddQuantize.decTest b/Lib/test/decimaltestdata/ddQuantize.decTest
index 9177620169..e1c5674d9a 100644
--- a/Lib/test/decimaltestdata/ddQuantize.decTest
+++ b/Lib/test/decimaltestdata/ddQuantize.decTest
@@ -462,7 +462,7 @@ ddqua520 quantize 1.234 1e359 -> 0E+359 Inexact Rounded
ddqua521 quantize 123.456 1e359 -> 0E+359 Inexact Rounded
ddqua522 quantize 1.234 1e359 -> 0E+359 Inexact Rounded
ddqua523 quantize 123.456 1e359 -> 0E+359 Inexact Rounded
--- next four are "won't fit" overfl
+-- next four are "won't fit" overflow
ddqua526 quantize 1.234 1e-299 -> NaN Invalid_operation
ddqua527 quantize 123.456 1e-299 -> NaN Invalid_operation
ddqua528 quantize 1.234 1e-299 -> NaN Invalid_operation
diff --git a/Lib/test/decimaltestdata/ddRemainder.decTest b/Lib/test/decimaltestdata/ddRemainder.decTest
index 5bd1e32d01..b1866d39a2 100644
--- a/Lib/test/decimaltestdata/ddRemainder.decTest
+++ b/Lib/test/decimaltestdata/ddRemainder.decTest
@@ -422,7 +422,7 @@ ddrem757 remainder 1 sNaN -> NaN Invalid_operation
ddrem758 remainder 1000 sNaN -> NaN Invalid_operation
ddrem759 remainder Inf -sNaN -> -NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
ddrem760 remainder NaN1 NaN7 -> NaN1
ddrem761 remainder sNaN2 NaN8 -> NaN2 Invalid_operation
ddrem762 remainder NaN3 sNaN9 -> NaN9 Invalid_operation
diff --git a/Lib/test/decimaltestdata/ddRemainderNear.decTest b/Lib/test/decimaltestdata/ddRemainderNear.decTest
index 6ba64ebafe..bbe82ea374 100644
--- a/Lib/test/decimaltestdata/ddRemainderNear.decTest
+++ b/Lib/test/decimaltestdata/ddRemainderNear.decTest
@@ -450,7 +450,7 @@ ddrmn757 remaindernear 1 sNaN -> NaN Invalid_operation
ddrmn758 remaindernear 1000 sNaN -> NaN Invalid_operation
ddrmn759 remaindernear Inf -sNaN -> -NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
ddrmn760 remaindernear NaN1 NaN7 -> NaN1
ddrmn761 remaindernear sNaN2 NaN8 -> NaN2 Invalid_operation
ddrmn762 remaindernear NaN3 sNaN9 -> NaN9 Invalid_operation
diff --git a/Lib/test/decimaltestdata/dqRemainder.decTest b/Lib/test/decimaltestdata/dqRemainder.decTest
index bae8eae526..e0aaca3747 100644
--- a/Lib/test/decimaltestdata/dqRemainder.decTest
+++ b/Lib/test/decimaltestdata/dqRemainder.decTest
@@ -418,7 +418,7 @@ dqrem757 remainder 1 sNaN -> NaN Invalid_operation
dqrem758 remainder 1000 sNaN -> NaN Invalid_operation
dqrem759 remainder Inf -sNaN -> -NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
dqrem760 remainder NaN1 NaN7 -> NaN1
dqrem761 remainder sNaN2 NaN8 -> NaN2 Invalid_operation
dqrem762 remainder NaN3 sNaN9 -> NaN9 Invalid_operation
diff --git a/Lib/test/decimaltestdata/dqRemainderNear.decTest b/Lib/test/decimaltestdata/dqRemainderNear.decTest
index b850626fe4..2c5c3f5074 100644
--- a/Lib/test/decimaltestdata/dqRemainderNear.decTest
+++ b/Lib/test/decimaltestdata/dqRemainderNear.decTest
@@ -450,7 +450,7 @@ dqrmn757 remaindernear 1 sNaN -> NaN Invalid_operation
dqrmn758 remaindernear 1000 sNaN -> NaN Invalid_operation
dqrmn759 remaindernear Inf -sNaN -> -NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
dqrmn760 remaindernear NaN1 NaN7 -> NaN1
dqrmn761 remaindernear sNaN2 NaN8 -> NaN2 Invalid_operation
dqrmn762 remaindernear NaN3 sNaN9 -> NaN9 Invalid_operation
diff --git a/Lib/test/decimaltestdata/exp.decTest b/Lib/test/decimaltestdata/exp.decTest
index 6a7af23b62..e01d7a8f92 100644
--- a/Lib/test/decimaltestdata/exp.decTest
+++ b/Lib/test/decimaltestdata/exp.decTest
@@ -28,7 +28,7 @@ rounding: half_even
maxExponent: 384
minexponent: -383
--- basics (examples in specificiation, etc.)
+-- basics (examples in specification, etc.)
expx001 exp -Infinity -> 0
expx002 exp -10 -> 0.0000453999298 Inexact Rounded
expx003 exp -1 -> 0.367879441 Inexact Rounded
diff --git a/Lib/test/decimaltestdata/extra.decTest b/Lib/test/decimaltestdata/extra.decTest
index b630d8e3f9..31291202a3 100644
--- a/Lib/test/decimaltestdata/extra.decTest
+++ b/Lib/test/decimaltestdata/extra.decTest
@@ -156,7 +156,7 @@ extr1302 fma -Inf 0E-456 sNaN148 -> NaN Invalid_operation
-- max/min/max_mag/min_mag bug in 2.5.2/2.6/3.0: max(NaN, finite) gave
-- incorrect answers when the finite number required rounding; similarly
--- for the other thre functions
+-- for the other three functions
maxexponent: 999
minexponent: -999
precision: 6
diff --git a/Lib/test/decimaltestdata/remainder.decTest b/Lib/test/decimaltestdata/remainder.decTest
index 7a1061b1e6..4f59b33287 100644
--- a/Lib/test/decimaltestdata/remainder.decTest
+++ b/Lib/test/decimaltestdata/remainder.decTest
@@ -435,7 +435,7 @@ remx757 remainder 1 sNaN -> NaN Invalid_operation
remx758 remainder 1000 sNaN -> NaN Invalid_operation
remx759 remainder Inf -sNaN -> -NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
remx760 remainder NaN1 NaN7 -> NaN1
remx761 remainder sNaN2 NaN8 -> NaN2 Invalid_operation
remx762 remainder NaN3 sNaN9 -> NaN9 Invalid_operation
diff --git a/Lib/test/decimaltestdata/remainderNear.decTest b/Lib/test/decimaltestdata/remainderNear.decTest
index b768b9e0cf..000b1424d8 100644
--- a/Lib/test/decimaltestdata/remainderNear.decTest
+++ b/Lib/test/decimaltestdata/remainderNear.decTest
@@ -498,7 +498,7 @@ rmnx758 remaindernear 1000 sNaN -> NaN Invalid_operation
rmnx759 remaindernear Inf sNaN -> NaN Invalid_operation
rmnx760 remaindernear NaN sNaN -> NaN Invalid_operation
--- propaging NaNs
+-- propagating NaNs
rmnx761 remaindernear NaN1 NaN7 -> NaN1
rmnx762 remaindernear sNaN2 NaN8 -> NaN2 Invalid_operation
rmnx763 remaindernear NaN3 -sNaN9 -> -NaN9 Invalid_operation
diff --git a/Lib/test/fork_wait.py b/Lib/test/fork_wait.py
new file mode 100644
index 0000000000..8c32895f5e
--- /dev/null
+++ b/Lib/test/fork_wait.py
@@ -0,0 +1,80 @@
+"""This test case provides support for checking forking and wait behavior.
+
+To test different wait behavior, override the wait_impl method.
+
+We want fork1() semantics -- only the forking thread survives in the
+child after a fork().
+
+On some systems (e.g. Solaris without posix threads) we find that all
+active threads survive in the child after a fork(); this is an error.
+"""
+
+import os, time, unittest
+import threading
+from test import support
+from test.support import threading_helper
+import warnings
+
+
+LONGSLEEP = 2
+SHORTSLEEP = 0.5
+NUM_THREADS = 4
+
+class ForkWait(unittest.TestCase):
+
+ def setUp(self):
+ self._threading_key = threading_helper.threading_setup()
+ self.alive = {}
+ self.stop = 0
+ self.threads = []
+
+ def tearDown(self):
+ # Stop threads
+ self.stop = 1
+ for thread in self.threads:
+ thread.join()
+ thread = None
+ self.threads.clear()
+ threading_helper.threading_cleanup(*self._threading_key)
+
+ def f(self, id):
+ while not self.stop:
+ self.alive[id] = os.getpid()
+ try:
+ time.sleep(SHORTSLEEP)
+ except OSError:
+ pass
+
+ def wait_impl(self, cpid, *, exitcode):
+ support.wait_process(cpid, exitcode=exitcode)
+
+ def test_wait(self):
+ for i in range(NUM_THREADS):
+ thread = threading.Thread(target=self.f, args=(i,))
+ thread.start()
+ self.threads.append(thread)
+
+ # busy-loop to wait for threads
+ for _ in support.sleeping_retry(support.SHORT_TIMEOUT):
+ if len(self.alive) >= NUM_THREADS:
+ break
+
+ a = sorted(self.alive.keys())
+ self.assertEqual(a, list(range(NUM_THREADS)))
+
+ prefork_lives = self.alive.copy()
+
+ # Ignore the warning about fork with threads.
+ with warnings.catch_warnings(category=DeprecationWarning,
+ action="ignore"):
+ if (cpid := os.fork()) == 0:
+ # Child
+ time.sleep(LONGSLEEP)
+ n = 0
+ for key in self.alive:
+ if self.alive[key] != prefork_lives[key]:
+ n += 1
+ os._exit(n)
+ else:
+ # Parent
+ self.wait_impl(cpid, exitcode=0)
diff --git a/Lib/test/pyclbr_input.py b/Lib/test/pyclbr_input.py
new file mode 100644
index 0000000000..5535edbfa7
--- /dev/null
+++ b/Lib/test/pyclbr_input.py
@@ -0,0 +1,85 @@
+"""Test cases for test_pyclbr.py"""
+
+def f(): pass
+
+class Other(object):
+ @classmethod
+ def foo(c): pass
+
+ def om(self): pass
+
+class B (object):
+ def bm(self): pass
+
+class C (B):
+ d = 10
+
+ # This one is correctly considered by both test_pyclbr.py and pyclbr.py
+ # as a non-method of C.
+ foo = Other().foo
+
+ # This causes test_pyclbr.py to fail, but only because the
+ # introspection-based is_method() code in the test can't
+ # distinguish between this and a genuine method function like m().
+ #
+ # The pyclbr.py module gets this right as it parses the text.
+ om = Other.om
+ f = f
+
+ def m(self): pass
+
+ @staticmethod
+ def sm(self): pass
+
+ @classmethod
+ def cm(self): pass
+
+# Check that mangling is correctly handled
+
+class a:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
+
+class _:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
+
+class __:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
+
+class ___:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
+
+class _a:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
+
+class __a:
+ def a(self): pass
+ def _(self): pass
+ def _a(self): pass
+ def __(self): pass
+ def ___(self): pass
+ def __a(self): pass
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index 6c7e799798..26a8b16724 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -840,11 +840,27 @@ def python_is_optimized():
return final_opt not in ('', '-O0', '-Og')
-_header = 'nP'
+# From CPython 3.13.5
+Py_GIL_DISABLED = bool(sysconfig.get_config_var('Py_GIL_DISABLED'))
+
+# From CPython 3.13.5
+def requires_gil_enabled(msg="needs the GIL enabled"):
+ """Decorator for skipping tests on the free-threaded build."""
+ return unittest.skipIf(Py_GIL_DISABLED, msg)
+
+# From CPython 3.13.5
+def expected_failure_if_gil_disabled():
+ """Expect test failure if the GIL is disabled."""
+ if Py_GIL_DISABLED:
+ return unittest.expectedFailure
+ return lambda test_case: test_case
+
+# From CPython 3.13.5
+if Py_GIL_DISABLED:
+ _header = 'PHBBInP'
+else:
+ _header = 'nP'
_align = '0n'
-if hasattr(sys, "getobjects"):
- _header = '2P' + _header
- _align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
@@ -2617,6 +2633,10 @@ def exceeds_recursion_limit():
'skipped on s390x')
HAVE_ASAN_FORK_BUG = check_sanitizer(address=True)
+# From CPython 3.13.5
+Py_TRACE_REFS = hasattr(sys, 'getobjects')
+
+
# From Cpython 3.13.5
@contextlib.contextmanager
def no_color():
@@ -2641,6 +2661,21 @@ def wrapper(*args, **kwargs):
return wrapper
+# From Cpython 3.13.5
+def force_not_colorized_test_class(cls):
+ """Force the terminal not to be colorized for the entire test class."""
+ original_setUpClass = cls.setUpClass
+
+ @classmethod
+ @functools.wraps(cls.setUpClass)
+ def new_setUpClass(cls):
+ cls.enterClassContext(no_color())
+ original_setUpClass()
+
+ cls.setUpClass = new_setUpClass
+ return cls
+
+
# From python 3.12.8
class BrokenIter:
def __init__(self, init_raises=False, next_raises=False, iter_raises=False):
diff --git a/Lib/test/test__colorize.py b/Lib/test/test__colorize.py
index 056a5306ce..b2f0bb1386 100644
--- a/Lib/test/test__colorize.py
+++ b/Lib/test/test__colorize.py
@@ -10,8 +10,7 @@
@contextlib.contextmanager
def clear_env():
with EnvironmentVarGuard() as mock_env:
- for var in "FORCE_COLOR", "NO_COLOR", "PYTHON_COLORS":
- mock_env.unset(var)
+ mock_env.unset("FORCE_COLOR", "NO_COLOR", "PYTHON_COLORS", "TERM")
yield mock_env
diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py
index be89bec522..0c20e27cfd 100644
--- a/Lib/test/test_array.py
+++ b/Lib/test/test_array.py
@@ -1285,8 +1285,6 @@ def check_overflow(self, lower, upper):
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py
deleted file mode 100644
index 1cac438250..0000000000
--- a/Lib/test/test_ast.py
+++ /dev/null
@@ -1,3236 +0,0 @@
-import ast
-import builtins
-import dis
-import enum
-import os
-import re
-import sys
-import textwrap
-import types
-import unittest
-import warnings
-import weakref
-from functools import partial
-from textwrap import dedent
-
-from test import support
-from test.support.import_helper import import_fresh_module
-from test.support import os_helper, script_helper
-from test.support.ast_helper import ASTTestMixin
-
-def to_tuple(t):
- if t is None or isinstance(t, (str, int, complex)) or t is Ellipsis:
- return t
- elif isinstance(t, list):
- return [to_tuple(e) for e in t]
- result = [t.__class__.__name__]
- if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
- result.append((t.lineno, t.col_offset))
- if hasattr(t, 'end_lineno') and hasattr(t, 'end_col_offset'):
- result[-1] += (t.end_lineno, t.end_col_offset)
- if t._fields is None:
- return tuple(result)
- for f in t._fields:
- result.append(to_tuple(getattr(t, f)))
- return tuple(result)
-
-
-# These tests are compiled through "exec"
-# There should be at least one test per statement
-exec_tests = [
- # None
- "None",
- # Module docstring
- "'module docstring'",
- # FunctionDef
- "def f(): pass",
- # FunctionDef with docstring
- "def f(): 'function docstring'",
- # FunctionDef with arg
- "def f(a): pass",
- # FunctionDef with arg and default value
- "def f(a=0): pass",
- # FunctionDef with varargs
- "def f(*args): pass",
- # FunctionDef with varargs as TypeVarTuple
- "def f(*args: *Ts): pass",
- # FunctionDef with varargs as unpacked Tuple
- "def f(*args: *tuple[int, ...]): pass",
- # FunctionDef with varargs as unpacked Tuple *and* TypeVarTuple
- "def f(*args: *tuple[int, *Ts]): pass",
- # FunctionDef with kwargs
- "def f(**kwargs): pass",
- # FunctionDef with all kind of args and docstring
- "def f(a, b=1, c=None, d=[], e={}, *args, f=42, **kwargs): 'doc for f()'",
- # FunctionDef with type annotation on return involving unpacking
- "def f() -> tuple[*Ts]: pass",
- "def f() -> tuple[int, *Ts]: pass",
- "def f() -> tuple[int, *tuple[int, ...]]: pass",
- # ClassDef
- "class C:pass",
- # ClassDef with docstring
- "class C: 'docstring for class C'",
- # ClassDef, new style class
- "class C(object): pass",
- # Return
- "def f():return 1",
- # Delete
- "del v",
- # Assign
- "v = 1",
- "a,b = c",
- "(a,b) = c",
- "[a,b] = c",
- # AnnAssign with unpacked types
- "x: tuple[*Ts]",
- "x: tuple[int, *Ts]",
- "x: tuple[int, *tuple[str, ...]]",
- # AugAssign
- "v += 1",
- # For
- "for v in v:pass",
- # While
- "while v:pass",
- # If
- "if v:pass",
- # If-Elif
- "if a:\n pass\nelif b:\n pass",
- # If-Elif-Else
- "if a:\n pass\nelif b:\n pass\nelse:\n pass",
- # With
- "with x as y: pass",
- "with x as y, z as q: pass",
- # Raise
- "raise Exception('string')",
- # TryExcept
- "try:\n pass\nexcept Exception:\n pass",
- # TryFinally
- "try:\n pass\nfinally:\n pass",
- # TryStarExcept
- "try:\n pass\nexcept* Exception:\n pass",
- # Assert
- "assert v",
- # Import
- "import sys",
- # ImportFrom
- "from sys import v",
- # Global
- "global v",
- # Expr
- "1",
- # Pass,
- "pass",
- # Break
- "for v in v:break",
- # Continue
- "for v in v:continue",
- # for statements with naked tuples (see http://bugs.python.org/issue6704)
- "for a,b in c: pass",
- "for (a,b) in c: pass",
- "for [a,b] in c: pass",
- # Multiline generator expression (test for .lineno & .col_offset)
- """(
- (
- Aa
- ,
- Bb
- )
- for
- Aa
- ,
- Bb in Cc
- )""",
- # dictcomp
- "{a : b for w in x for m in p if g}",
- # dictcomp with naked tuple
- "{a : b for v,w in x}",
- # setcomp
- "{r for l in x if g}",
- # setcomp with naked tuple
- "{r for l,m in x}",
- # AsyncFunctionDef
- "async def f():\n 'async function'\n await something()",
- # AsyncFor
- "async def f():\n async for e in i: 1\n else: 2",
- # AsyncWith
- "async def f():\n async with a as b: 1",
- # PEP 448: Additional Unpacking Generalizations
- "{**{1:2}, 2:3}",
- "{*{1, 2}, 3}",
- # Asynchronous comprehensions
- "async def f():\n [i async for b in c]",
- # Decorated FunctionDef
- "@deco1\n@deco2()\n@deco3(1)\ndef f(): pass",
- # Decorated AsyncFunctionDef
- "@deco1\n@deco2()\n@deco3(1)\nasync def f(): pass",
- # Decorated ClassDef
- "@deco1\n@deco2()\n@deco3(1)\nclass C: pass",
- # Decorator with generator argument
- "@deco(a for a in b)\ndef f(): pass",
- # Decorator with attribute
- "@a.b.c\ndef f(): pass",
- # Simple assignment expression
- "(a := 1)",
- # Positional-only arguments
- "def f(a, /,): pass",
- "def f(a, /, c, d, e): pass",
- "def f(a, /, c, *, d, e): pass",
- "def f(a, /, c, *, d, e, **kwargs): pass",
- # Positional-only arguments with defaults
- "def f(a=1, /,): pass",
- "def f(a=1, /, b=2, c=4): pass",
- "def f(a=1, /, b=2, *, c=4): pass",
- "def f(a=1, /, b=2, *, c): pass",
- "def f(a=1, /, b=2, *, c=4, **kwargs): pass",
- "def f(a=1, /, b=2, *, c, **kwargs): pass",
- # Type aliases
- "type X = int",
- "type X[T] = int",
- "type X[T, *Ts, **P] = (T, Ts, P)",
- "type X[T: int, *Ts, **P] = (T, Ts, P)",
- "type X[T: (int, str), *Ts, **P] = (T, Ts, P)",
- # Generic classes
- "class X[T]: pass",
- "class X[T, *Ts, **P]: pass",
- "class X[T: int, *Ts, **P]: pass",
- "class X[T: (int, str), *Ts, **P]: pass",
- # Generic functions
- "def f[T](): pass",
- "def f[T, *Ts, **P](): pass",
- "def f[T: int, *Ts, **P](): pass",
- "def f[T: (int, str), *Ts, **P](): pass",
-]
-
-# These are compiled through "single"
-# because of overlap with "eval", it just tests what
-# can't be tested with "eval"
-single_tests = [
- "1+2"
-]
-
-# These are compiled through "eval"
-# It should test all expressions
-eval_tests = [
- # None
- "None",
- # BoolOp
- "a and b",
- # BinOp
- "a + b",
- # UnaryOp
- "not v",
- # Lambda
- "lambda:None",
- # Dict
- "{ 1:2 }",
- # Empty dict
- "{}",
- # Set
- "{None,}",
- # Multiline dict (test for .lineno & .col_offset)
- """{
- 1
- :
- 2
- }""",
- # ListComp
- "[a for b in c if d]",
- # GeneratorExp
- "(a for b in c if d)",
- # Comprehensions with multiple for targets
- "[(a,b) for a,b in c]",
- "[(a,b) for (a,b) in c]",
- "[(a,b) for [a,b] in c]",
- "{(a,b) for a,b in c}",
- "{(a,b) for (a,b) in c}",
- "{(a,b) for [a,b] in c}",
- "((a,b) for a,b in c)",
- "((a,b) for (a,b) in c)",
- "((a,b) for [a,b] in c)",
- # Yield - yield expressions can't work outside a function
- #
- # Compare
- "1 < 2 < 3",
- # Call
- "f(1,2,c=3,*d,**e)",
- # Call with multi-character starred
- "f(*[0, 1])",
- # Call with a generator argument
- "f(a for a in b)",
- # Num
- "10",
- # Str
- "'string'",
- # Attribute
- "a.b",
- # Subscript
- "a[b:c]",
- # Name
- "v",
- # List
- "[1,2,3]",
- # Empty list
- "[]",
- # Tuple
- "1,2,3",
- # Tuple
- "(1,2,3)",
- # Empty tuple
- "()",
- # Combination
- "a.b.c.d(a.b[1:2])",
-]
-
-# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
-# excepthandler, arguments, keywords, alias
-
-class AST_Tests(unittest.TestCase):
- maxDiff = None
-
- def _is_ast_node(self, name, node):
- if not isinstance(node, type):
- return False
- if "ast" not in node.__module__:
- return False
- return name != 'AST' and name[0].isupper()
-
- def _assertTrueorder(self, ast_node, parent_pos):
- if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
- return
- if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
- node_pos = (ast_node.lineno, ast_node.col_offset)
- self.assertGreaterEqual(node_pos, parent_pos)
- parent_pos = (ast_node.lineno, ast_node.col_offset)
- for name in ast_node._fields:
- value = getattr(ast_node, name)
- if isinstance(value, list):
- first_pos = parent_pos
- if value and name == 'decorator_list':
- first_pos = (value[0].lineno, value[0].col_offset)
- for child in value:
- self._assertTrueorder(child, first_pos)
- elif value is not None:
- self._assertTrueorder(value, parent_pos)
- self.assertEqual(ast_node._fields, ast_node.__match_args__)
-
- def test_AST_objects(self):
- x = ast.AST()
- self.assertEqual(x._fields, ())
- x.foobar = 42
- self.assertEqual(x.foobar, 42)
- self.assertEqual(x.__dict__["foobar"], 42)
-
- with self.assertRaises(AttributeError):
- x.vararg
-
- with self.assertRaises(TypeError):
- # "ast.AST constructor takes 0 positional arguments"
- ast.AST(2)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_AST_garbage_collection(self):
- class X:
- pass
- a = ast.AST()
- a.x = X()
- a.x.a = a
- ref = weakref.ref(a.x)
- del a
- support.gc_collect()
- self.assertIsNone(ref())
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_snippets(self):
- for input, output, kind in ((exec_tests, exec_results, "exec"),
- (single_tests, single_results, "single"),
- (eval_tests, eval_results, "eval")):
- for i, o in zip(input, output):
- with self.subTest(action="parsing", input=i):
- ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
- self.assertEqual(to_tuple(ast_tree), o)
- self._assertTrueorder(ast_tree, (0, 0))
- with self.subTest(action="compiling", input=i, kind=kind):
- compile(ast_tree, "?", kind)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_ast_validation(self):
- # compile() is the only function that calls PyAST_Validate
- snippets_to_validate = exec_tests + single_tests + eval_tests
- for snippet in snippets_to_validate:
- tree = ast.parse(snippet)
- compile(tree, '', 'exec')
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_invalid_position_information(self):
- invalid_linenos = [
- (10, 1), (-10, -11), (10, -11), (-5, -2), (-5, 1)
- ]
-
- for lineno, end_lineno in invalid_linenos:
- with self.subTest(f"Check invalid linenos {lineno}:{end_lineno}"):
- snippet = "a = 1"
- tree = ast.parse(snippet)
- tree.body[0].lineno = lineno
- tree.body[0].end_lineno = end_lineno
- with self.assertRaises(ValueError):
- compile(tree, '', 'exec')
-
- invalid_col_offsets = [
- (10, 1), (-10, -11), (10, -11), (-5, -2), (-5, 1)
- ]
- for col_offset, end_col_offset in invalid_col_offsets:
- with self.subTest(f"Check invalid col_offset {col_offset}:{end_col_offset}"):
- snippet = "a = 1"
- tree = ast.parse(snippet)
- tree.body[0].col_offset = col_offset
- tree.body[0].end_col_offset = end_col_offset
- with self.assertRaises(ValueError):
- compile(tree, '', 'exec')
-
- # XXX RUSTPYTHON: we always require that end ranges be present
- @unittest.expectedFailure
- def test_compilation_of_ast_nodes_with_default_end_position_values(self):
- tree = ast.Module(body=[
- ast.Import(names=[ast.alias(name='builtins', lineno=1, col_offset=0)], lineno=1, col_offset=0),
- ast.Import(names=[ast.alias(name='traceback', lineno=0, col_offset=0)], lineno=0, col_offset=1)
- ], type_ignores=[])
-
- # Check that compilation doesn't crash. Note: this may crash explicitly only on debug mode.
- compile(tree, "", "exec")
-
- def test_slice(self):
- slc = ast.parse("x[::]").body[0].value.slice
- self.assertIsNone(slc.upper)
- self.assertIsNone(slc.lower)
- self.assertIsNone(slc.step)
-
- def test_from_import(self):
- im = ast.parse("from . import y").body[0]
- self.assertIsNone(im.module)
-
- def test_non_interned_future_from_ast(self):
- mod = ast.parse("from __future__ import division")
- self.assertIsInstance(mod.body[0], ast.ImportFrom)
- mod.body[0].module = " __future__ ".strip()
- compile(mod, "", "exec")
-
- def test_alias(self):
- im = ast.parse("from bar import y").body[0]
- self.assertEqual(len(im.names), 1)
- alias = im.names[0]
- self.assertEqual(alias.name, 'y')
- self.assertIsNone(alias.asname)
- self.assertEqual(alias.lineno, 1)
- self.assertEqual(alias.end_lineno, 1)
- self.assertEqual(alias.col_offset, 16)
- self.assertEqual(alias.end_col_offset, 17)
-
- im = ast.parse("from bar import *").body[0]
- alias = im.names[0]
- self.assertEqual(alias.name, '*')
- self.assertIsNone(alias.asname)
- self.assertEqual(alias.lineno, 1)
- self.assertEqual(alias.end_lineno, 1)
- self.assertEqual(alias.col_offset, 16)
- self.assertEqual(alias.end_col_offset, 17)
-
- im = ast.parse("from bar import y as z").body[0]
- alias = im.names[0]
- self.assertEqual(alias.name, "y")
- self.assertEqual(alias.asname, "z")
- self.assertEqual(alias.lineno, 1)
- self.assertEqual(alias.end_lineno, 1)
- self.assertEqual(alias.col_offset, 16)
- self.assertEqual(alias.end_col_offset, 22)
-
- im = ast.parse("import bar as foo").body[0]
- alias = im.names[0]
- self.assertEqual(alias.name, "bar")
- self.assertEqual(alias.asname, "foo")
- self.assertEqual(alias.lineno, 1)
- self.assertEqual(alias.end_lineno, 1)
- self.assertEqual(alias.col_offset, 7)
- self.assertEqual(alias.end_col_offset, 17)
-
- def test_base_classes(self):
- self.assertTrue(issubclass(ast.For, ast.stmt))
- self.assertTrue(issubclass(ast.Name, ast.expr))
- self.assertTrue(issubclass(ast.stmt, ast.AST))
- self.assertTrue(issubclass(ast.expr, ast.AST))
- self.assertTrue(issubclass(ast.comprehension, ast.AST))
- self.assertTrue(issubclass(ast.Gt, ast.AST))
-
- def test_import_deprecated(self):
- ast = import_fresh_module('ast')
- depr_regex = (
- r'ast\.{} is deprecated and will be removed in Python 3.14; '
- r'use ast\.Constant instead'
- )
- for name in 'Num', 'Str', 'Bytes', 'NameConstant', 'Ellipsis':
- with self.assertWarnsRegex(DeprecationWarning, depr_regex.format(name)):
- getattr(ast, name)
-
- def test_field_attr_existence_deprecated(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num, Str, Bytes, NameConstant, Ellipsis
-
- for name in ('Num', 'Str', 'Bytes', 'NameConstant', 'Ellipsis'):
- item = getattr(ast, name)
- if self._is_ast_node(name, item):
- with self.subTest(item):
- with self.assertWarns(DeprecationWarning):
- x = item()
- if isinstance(x, ast.AST):
- self.assertIs(type(x._fields), tuple)
-
- def test_field_attr_existence(self):
- for name, item in ast.__dict__.items():
- # These emit DeprecationWarnings
- if name in {'Num', 'Str', 'Bytes', 'NameConstant', 'Ellipsis'}:
- continue
- # constructor has a different signature
- if name == 'Index':
- continue
- if self._is_ast_node(name, item):
- x = item()
- if isinstance(x, ast.AST):
- self.assertIs(type(x._fields), tuple)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_arguments(self):
- x = ast.arguments()
- self.assertEqual(x._fields, ('posonlyargs', 'args', 'vararg', 'kwonlyargs',
- 'kw_defaults', 'kwarg', 'defaults'))
-
- with self.assertRaises(AttributeError):
- x.args
- self.assertIsNone(x.vararg)
-
- x = ast.arguments(*range(1, 8))
- self.assertEqual(x.args, 2)
- self.assertEqual(x.vararg, 3)
-
- def test_field_attr_writable_deprecated(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- x = ast.Num()
- # We can assign to _fields
- x._fields = 666
- self.assertEqual(x._fields, 666)
-
- def test_field_attr_writable(self):
- x = ast.Constant()
- # We can assign to _fields
- x._fields = 666
- self.assertEqual(x._fields, 666)
-
- def test_classattrs_deprecated(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num, Str, Bytes, NameConstant, Ellipsis
-
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- x = ast.Num()
- self.assertEqual(x._fields, ('value', 'kind'))
-
- with self.assertRaises(AttributeError):
- x.value
-
- with self.assertRaises(AttributeError):
- x.n
-
- x = ast.Num(42)
- self.assertEqual(x.value, 42)
- self.assertEqual(x.n, 42)
-
- with self.assertRaises(AttributeError):
- x.lineno
-
- with self.assertRaises(AttributeError):
- x.foobar
-
- x = ast.Num(lineno=2)
- self.assertEqual(x.lineno, 2)
-
- x = ast.Num(42, lineno=0)
- self.assertEqual(x.lineno, 0)
- self.assertEqual(x._fields, ('value', 'kind'))
- self.assertEqual(x.value, 42)
- self.assertEqual(x.n, 42)
-
- self.assertRaises(TypeError, ast.Num, 1, None, 2)
- self.assertRaises(TypeError, ast.Num, 1, None, 2, lineno=0)
-
- # Arbitrary keyword arguments are supported
- self.assertEqual(ast.Num(1, foo='bar').foo, 'bar')
-
- with self.assertRaisesRegex(TypeError, "Num got multiple values for argument 'n'"):
- ast.Num(1, n=2)
-
- self.assertEqual(ast.Num(42).n, 42)
- self.assertEqual(ast.Num(4.25).n, 4.25)
- self.assertEqual(ast.Num(4.25j).n, 4.25j)
- self.assertEqual(ast.Str('42').s, '42')
- self.assertEqual(ast.Bytes(b'42').s, b'42')
- self.assertIs(ast.NameConstant(True).value, True)
- self.assertIs(ast.NameConstant(False).value, False)
- self.assertIs(ast.NameConstant(None).value, None)
-
- self.assertEqual([str(w.message) for w in wlog], [
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute s is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Bytes is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute s is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- ])
-
- def test_classattrs(self):
- x = ast.Constant()
- self.assertEqual(x._fields, ('value', 'kind'))
-
- with self.assertRaises(AttributeError):
- x.value
-
- x = ast.Constant(42)
- self.assertEqual(x.value, 42)
-
- with self.assertRaises(AttributeError):
- x.lineno
-
- with self.assertRaises(AttributeError):
- x.foobar
-
- x = ast.Constant(lineno=2)
- self.assertEqual(x.lineno, 2)
-
- x = ast.Constant(42, lineno=0)
- self.assertEqual(x.lineno, 0)
- self.assertEqual(x._fields, ('value', 'kind'))
- self.assertEqual(x.value, 42)
-
- self.assertRaises(TypeError, ast.Constant, 1, None, 2)
- self.assertRaises(TypeError, ast.Constant, 1, None, 2, lineno=0)
-
- # Arbitrary keyword arguments are supported
- self.assertEqual(ast.Constant(1, foo='bar').foo, 'bar')
-
- with self.assertRaisesRegex(TypeError, "Constant got multiple values for argument 'value'"):
- ast.Constant(1, value=2)
-
- self.assertEqual(ast.Constant(42).value, 42)
- self.assertEqual(ast.Constant(4.25).value, 4.25)
- self.assertEqual(ast.Constant(4.25j).value, 4.25j)
- self.assertEqual(ast.Constant('42').value, '42')
- self.assertEqual(ast.Constant(b'42').value, b'42')
- self.assertIs(ast.Constant(True).value, True)
- self.assertIs(ast.Constant(False).value, False)
- self.assertIs(ast.Constant(None).value, None)
- self.assertIs(ast.Constant(...).value, ...)
-
- def test_realtype(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num, Str, Bytes, NameConstant, Ellipsis
-
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- self.assertIs(type(ast.Num(42)), ast.Constant)
- self.assertIs(type(ast.Num(4.25)), ast.Constant)
- self.assertIs(type(ast.Num(4.25j)), ast.Constant)
- self.assertIs(type(ast.Str('42')), ast.Constant)
- self.assertIs(type(ast.Bytes(b'42')), ast.Constant)
- self.assertIs(type(ast.NameConstant(True)), ast.Constant)
- self.assertIs(type(ast.NameConstant(False)), ast.Constant)
- self.assertIs(type(ast.NameConstant(None)), ast.Constant)
- self.assertIs(type(ast.Ellipsis()), ast.Constant)
-
- self.assertEqual([str(w.message) for w in wlog], [
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Bytes is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Ellipsis is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- ])
-
- def test_isinstance(self):
- from ast import Constant
-
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num, Str, Bytes, NameConstant, Ellipsis
-
- cls_depr_msg = (
- 'ast.{} is deprecated and will be removed in Python 3.14; '
- 'use ast.Constant instead'
- )
-
- assertNumDeprecated = partial(
- self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Num")
- )
- assertStrDeprecated = partial(
- self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Str")
- )
- assertBytesDeprecated = partial(
- self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Bytes")
- )
- assertNameConstantDeprecated = partial(
- self.assertWarnsRegex,
- DeprecationWarning,
- cls_depr_msg.format("NameConstant")
- )
- assertEllipsisDeprecated = partial(
- self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Ellipsis")
- )
-
- for arg in 42, 4.2, 4.2j:
- with self.subTest(arg=arg):
- with assertNumDeprecated():
- n = Num(arg)
- with assertNumDeprecated():
- self.assertIsInstance(n, Num)
-
- with assertStrDeprecated():
- s = Str('42')
- with assertStrDeprecated():
- self.assertIsInstance(s, Str)
-
- with assertBytesDeprecated():
- b = Bytes(b'42')
- with assertBytesDeprecated():
- self.assertIsInstance(b, Bytes)
-
- for arg in True, False, None:
- with self.subTest(arg=arg):
- with assertNameConstantDeprecated():
- n = NameConstant(arg)
- with assertNameConstantDeprecated():
- self.assertIsInstance(n, NameConstant)
-
- with assertEllipsisDeprecated():
- e = Ellipsis()
- with assertEllipsisDeprecated():
- self.assertIsInstance(e, Ellipsis)
-
- for arg in 42, 4.2, 4.2j:
- with self.subTest(arg=arg):
- with assertNumDeprecated():
- self.assertIsInstance(Constant(arg), Num)
-
- with assertStrDeprecated():
- self.assertIsInstance(Constant('42'), Str)
-
- with assertBytesDeprecated():
- self.assertIsInstance(Constant(b'42'), Bytes)
-
- for arg in True, False, None:
- with self.subTest(arg=arg):
- with assertNameConstantDeprecated():
- self.assertIsInstance(Constant(arg), NameConstant)
-
- with assertEllipsisDeprecated():
- self.assertIsInstance(Constant(...), Ellipsis)
-
- with assertStrDeprecated():
- s = Str('42')
- assertNumDeprecated(self.assertNotIsInstance, s, Num)
- assertBytesDeprecated(self.assertNotIsInstance, s, Bytes)
-
- with assertNumDeprecated():
- n = Num(42)
- assertStrDeprecated(self.assertNotIsInstance, n, Str)
- assertNameConstantDeprecated(self.assertNotIsInstance, n, NameConstant)
- assertEllipsisDeprecated(self.assertNotIsInstance, n, Ellipsis)
-
- with assertNameConstantDeprecated():
- n = NameConstant(True)
- with assertNumDeprecated():
- self.assertNotIsInstance(n, Num)
-
- with assertNameConstantDeprecated():
- n = NameConstant(False)
- with assertNumDeprecated():
- self.assertNotIsInstance(n, Num)
-
- for arg in '42', True, False:
- with self.subTest(arg=arg):
- with assertNumDeprecated():
- self.assertNotIsInstance(Constant(arg), Num)
-
- assertStrDeprecated(self.assertNotIsInstance, Constant(42), Str)
- assertBytesDeprecated(self.assertNotIsInstance, Constant('42'), Bytes)
- assertNameConstantDeprecated(self.assertNotIsInstance, Constant(42), NameConstant)
- assertEllipsisDeprecated(self.assertNotIsInstance, Constant(42), Ellipsis)
- assertNumDeprecated(self.assertNotIsInstance, Constant(), Num)
- assertStrDeprecated(self.assertNotIsInstance, Constant(), Str)
- assertBytesDeprecated(self.assertNotIsInstance, Constant(), Bytes)
- assertNameConstantDeprecated(self.assertNotIsInstance, Constant(), NameConstant)
- assertEllipsisDeprecated(self.assertNotIsInstance, Constant(), Ellipsis)
-
- class S(str): pass
- with assertStrDeprecated():
- self.assertIsInstance(Constant(S('42')), Str)
- with assertNumDeprecated():
- self.assertNotIsInstance(Constant(S('42')), Num)
-
- def test_constant_subclasses_deprecated(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num
-
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- class N(ast.Num):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.z = 'spam'
- class N2(ast.Num):
- pass
-
- n = N(42)
- self.assertEqual(n.n, 42)
- self.assertEqual(n.z, 'spam')
- self.assertIs(type(n), N)
- self.assertIsInstance(n, N)
- self.assertIsInstance(n, ast.Num)
- self.assertNotIsInstance(n, N2)
- self.assertNotIsInstance(ast.Num(42), N)
- n = N(n=42)
- self.assertEqual(n.n, 42)
- self.assertIs(type(n), N)
-
- self.assertEqual([str(w.message) for w in wlog], [
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- ])
-
- def test_constant_subclasses(self):
- class N(ast.Constant):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.z = 'spam'
- class N2(ast.Constant):
- pass
-
- n = N(42)
- self.assertEqual(n.value, 42)
- self.assertEqual(n.z, 'spam')
- self.assertEqual(type(n), N)
- self.assertTrue(isinstance(n, N))
- self.assertTrue(isinstance(n, ast.Constant))
- self.assertFalse(isinstance(n, N2))
- self.assertFalse(isinstance(ast.Constant(42), N))
- n = N(value=42)
- self.assertEqual(n.value, 42)
- self.assertEqual(type(n), N)
-
- def test_module(self):
- body = [ast.Constant(42)]
- x = ast.Module(body, [])
- self.assertEqual(x.body, body)
-
- def test_nodeclasses(self):
- # Zero arguments constructor explicitly allowed
- x = ast.BinOp()
- self.assertEqual(x._fields, ('left', 'op', 'right'))
-
- # Random attribute allowed too
- x.foobarbaz = 5
- self.assertEqual(x.foobarbaz, 5)
-
- n1 = ast.Constant(1)
- n3 = ast.Constant(3)
- addop = ast.Add()
- x = ast.BinOp(n1, addop, n3)
- self.assertEqual(x.left, n1)
- self.assertEqual(x.op, addop)
- self.assertEqual(x.right, n3)
-
- x = ast.BinOp(1, 2, 3)
- self.assertEqual(x.left, 1)
- self.assertEqual(x.op, 2)
- self.assertEqual(x.right, 3)
-
- x = ast.BinOp(1, 2, 3, lineno=0)
- self.assertEqual(x.left, 1)
- self.assertEqual(x.op, 2)
- self.assertEqual(x.right, 3)
- self.assertEqual(x.lineno, 0)
-
- # node raises exception when given too many arguments
- self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
- # node raises exception when given too many arguments
- self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
-
- # can set attributes through kwargs too
- x = ast.BinOp(left=1, op=2, right=3, lineno=0)
- self.assertEqual(x.left, 1)
- self.assertEqual(x.op, 2)
- self.assertEqual(x.right, 3)
- self.assertEqual(x.lineno, 0)
-
- # Random kwargs also allowed
- x = ast.BinOp(1, 2, 3, foobarbaz=42)
- self.assertEqual(x.foobarbaz, 42)
-
- def test_no_fields(self):
- # this used to fail because Sub._fields was None
- x = ast.Sub()
- self.assertEqual(x._fields, ())
-
- # TODO: RUSTPYTHON _ast classes should be HEAPTYPES (except for _ast.AST)
- @unittest.expectedFailure
- def test_pickling(self):
- import pickle
-
- for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
- for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
- ast2 = pickle.loads(pickle.dumps(ast, protocol))
- self.assertEqual(to_tuple(ast2), to_tuple(ast))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_invalid_sum(self):
- pos = dict(lineno=2, col_offset=3)
- m = ast.Module([ast.Expr(ast.expr(**pos), **pos)], [])
- with self.assertRaises(TypeError) as cm:
- compile(m, "", "exec")
- self.assertIn("but got ", "exec")
- self.assertIn("identifier must be of type str", str(cm.exception))
-
- def test_invalid_constant(self):
- for invalid_constant in int, (1, 2, int), frozenset((1, 2, int)):
- e = ast.Expression(body=ast.Constant(invalid_constant))
- ast.fix_missing_locations(e)
- with self.assertRaisesRegex(
- TypeError, "invalid type in Constant: type"
- ):
- compile(e, "", "eval")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_empty_yield_from(self):
- # Issue 16546: yield from value is not optional.
- empty_yield_from = ast.parse("def f():\n yield from g()")
- empty_yield_from.body[0].body[0].value.value = None
- with self.assertRaises(ValueError) as cm:
- compile(empty_yield_from, "", "exec")
- self.assertIn("field 'value' is required", str(cm.exception))
-
- @support.cpython_only
- def test_issue31592(self):
- # There shouldn't be an assertion failure in case of a bad
- # unicodedata.normalize().
- import unicodedata
- def bad_normalize(*args):
- return None
- with support.swap_attr(unicodedata, 'normalize', bad_normalize):
- self.assertRaises(TypeError, ast.parse, '\u03D5')
-
- def test_issue18374_binop_col_offset(self):
- tree = ast.parse('4+5+6+7')
- parent_binop = tree.body[0].value
- child_binop = parent_binop.left
- grandchild_binop = child_binop.left
- self.assertEqual(parent_binop.col_offset, 0)
- self.assertEqual(parent_binop.end_col_offset, 7)
- self.assertEqual(child_binop.col_offset, 0)
- self.assertEqual(child_binop.end_col_offset, 5)
- self.assertEqual(grandchild_binop.col_offset, 0)
- self.assertEqual(grandchild_binop.end_col_offset, 3)
-
- tree = ast.parse('4+5-\\\n 6-7')
- parent_binop = tree.body[0].value
- child_binop = parent_binop.left
- grandchild_binop = child_binop.left
- self.assertEqual(parent_binop.col_offset, 0)
- self.assertEqual(parent_binop.lineno, 1)
- self.assertEqual(parent_binop.end_col_offset, 4)
- self.assertEqual(parent_binop.end_lineno, 2)
-
- self.assertEqual(child_binop.col_offset, 0)
- self.assertEqual(child_binop.lineno, 1)
- self.assertEqual(child_binop.end_col_offset, 2)
- self.assertEqual(child_binop.end_lineno, 2)
-
- self.assertEqual(grandchild_binop.col_offset, 0)
- self.assertEqual(grandchild_binop.lineno, 1)
- self.assertEqual(grandchild_binop.end_col_offset, 3)
- self.assertEqual(grandchild_binop.end_lineno, 1)
-
- def test_issue39579_dotted_name_end_col_offset(self):
- tree = ast.parse('@a.b.c\ndef f(): pass')
- attr_b = tree.body[0].decorator_list[0].value
- self.assertEqual(attr_b.end_col_offset, 4)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_ast_asdl_signature(self):
- self.assertEqual(ast.withitem.__doc__, "withitem(expr context_expr, expr? optional_vars)")
- self.assertEqual(ast.GtE.__doc__, "GtE")
- self.assertEqual(ast.Name.__doc__, "Name(identifier id, expr_context ctx)")
- self.assertEqual(ast.cmpop.__doc__, "cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn")
- expressions = [f" | {node.__doc__}" for node in ast.expr.__subclasses__()]
- expressions[0] = f"expr = {ast.expr.__subclasses__()[0].__doc__}"
- self.assertCountEqual(ast.expr.__doc__.split("\n"), expressions)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_positional_only_feature_version(self):
- ast.parse('def foo(x, /): ...', feature_version=(3, 8))
- ast.parse('def bar(x=1, /): ...', feature_version=(3, 8))
- with self.assertRaises(SyntaxError):
- ast.parse('def foo(x, /): ...', feature_version=(3, 7))
- with self.assertRaises(SyntaxError):
- ast.parse('def bar(x=1, /): ...', feature_version=(3, 7))
-
- ast.parse('lambda x, /: ...', feature_version=(3, 8))
- ast.parse('lambda x=1, /: ...', feature_version=(3, 8))
- with self.assertRaises(SyntaxError):
- ast.parse('lambda x, /: ...', feature_version=(3, 7))
- with self.assertRaises(SyntaxError):
- ast.parse('lambda x=1, /: ...', feature_version=(3, 7))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_parenthesized_with_feature_version(self):
- ast.parse('with (CtxManager() as example): ...', feature_version=(3, 10))
- # While advertised as a feature in Python 3.10, this was allowed starting 3.9
- ast.parse('with (CtxManager() as example): ...', feature_version=(3, 9))
- with self.assertRaises(SyntaxError):
- ast.parse('with (CtxManager() as example): ...', feature_version=(3, 8))
- ast.parse('with CtxManager() as example: ...', feature_version=(3, 8))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_assignment_expression_feature_version(self):
- ast.parse('(x := 0)', feature_version=(3, 8))
- with self.assertRaises(SyntaxError):
- ast.parse('(x := 0)', feature_version=(3, 7))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_exception_groups_feature_version(self):
- code = dedent('''
- try: ...
- except* Exception: ...
- ''')
- ast.parse(code)
- with self.assertRaises(SyntaxError):
- ast.parse(code, feature_version=(3, 10))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_type_params_feature_version(self):
- samples = [
- "type X = int",
- "class X[T]: pass",
- "def f[T](): pass",
- ]
- for sample in samples:
- with self.subTest(sample):
- ast.parse(sample)
- with self.assertRaises(SyntaxError):
- ast.parse(sample, feature_version=(3, 11))
-
- def test_invalid_major_feature_version(self):
- with self.assertRaises(ValueError):
- ast.parse('pass', feature_version=(2, 7))
- with self.assertRaises(ValueError):
- ast.parse('pass', feature_version=(4, 0))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_constant_as_name(self):
- for constant in "True", "False", "None":
- expr = ast.Expression(ast.Name(constant, ast.Load()))
- ast.fix_missing_locations(expr)
- with self.assertRaisesRegex(ValueError, f"identifier field can't represent '{constant}' constant"):
- compile(expr, "", "eval")
-
- @unittest.skip("TODO: RUSTPYTHON, TypeError: enum mismatch")
- def test_precedence_enum(self):
- class _Precedence(enum.IntEnum):
- """Precedence table that originated from python grammar."""
- NAMED_EXPR = enum.auto() # :=
- TUPLE = enum.auto() # ,
- YIELD = enum.auto() # 'yield', 'yield from'
- TEST = enum.auto() # 'if'-'else', 'lambda'
- OR = enum.auto() # 'or'
- AND = enum.auto() # 'and'
- NOT = enum.auto() # 'not'
- CMP = enum.auto() # '<', '>', '==', '>=', '<=', '!=',
- # 'in', 'not in', 'is', 'is not'
- EXPR = enum.auto()
- BOR = EXPR # '|'
- BXOR = enum.auto() # '^'
- BAND = enum.auto() # '&'
- SHIFT = enum.auto() # '<<', '>>'
- ARITH = enum.auto() # '+', '-'
- TERM = enum.auto() # '*', '@', '/', '%', '//'
- FACTOR = enum.auto() # unary '+', '-', '~'
- POWER = enum.auto() # '**'
- AWAIT = enum.auto() # 'await'
- ATOM = enum.auto()
- def next(self):
- try:
- return self.__class__(self + 1)
- except ValueError:
- return self
- enum._test_simple_enum(_Precedence, ast._Precedence)
-
- @unittest.skipIf(support.is_wasi, "exhausts limited stack on WASI")
- @support.cpython_only
- def test_ast_recursion_limit(self):
- fail_depth = support.EXCEEDS_RECURSION_LIMIT
- crash_depth = 100_000
- success_depth = 1200
-
- def check_limit(prefix, repeated):
- expect_ok = prefix + repeated * success_depth
- ast.parse(expect_ok)
- for depth in (fail_depth, crash_depth):
- broken = prefix + repeated * depth
- details = "Compiling ({!r} + {!r} * {})".format(
- prefix, repeated, depth)
- with self.assertRaises(RecursionError, msg=details):
- with support.infinite_recursion():
- ast.parse(broken)
-
- check_limit("a", "()")
- check_limit("a", ".b")
- check_limit("a", "[0]")
- check_limit("a", "*a")
-
- def test_null_bytes(self):
- with self.assertRaises(SyntaxError,
- msg="source code string cannot contain null bytes"):
- ast.parse("a\0b")
-
- def assert_none_check(self, node: type[ast.AST], attr: str, source: str) -> None:
- with self.subTest(f"{node.__name__}.{attr}"):
- tree = ast.parse(source)
- found = 0
- for child in ast.walk(tree):
- if isinstance(child, node):
- setattr(child, attr, None)
- found += 1
- self.assertEqual(found, 1)
- e = re.escape(f"field '{attr}' is required for {node.__name__}")
- with self.assertRaisesRegex(ValueError, f"^{e}$"):
- compile(tree, "", "exec")
-
- @unittest.skip("TODO: RUSTPYTHON, TypeError: Expected type 'str' but 'NoneType' found")
- def test_none_checks(self) -> None:
- tests = [
- (ast.alias, "name", "import spam as SPAM"),
- (ast.arg, "arg", "def spam(SPAM): spam"),
- (ast.comprehension, "target", "[spam for SPAM in spam]"),
- (ast.comprehension, "iter", "[spam for spam in SPAM]"),
- (ast.keyword, "value", "spam(**SPAM)"),
- (ast.match_case, "pattern", "match spam:\n case SPAM: spam"),
- (ast.withitem, "context_expr", "with SPAM: spam"),
- ]
- for node, attr, source in tests:
- self.assert_none_check(node, attr, source)
-
-class ASTHelpers_Test(unittest.TestCase):
- maxDiff = None
-
- def test_parse(self):
- a = ast.parse('foo(1 + 1)')
- b = compile('foo(1 + 1)', '', 'exec', ast.PyCF_ONLY_AST)
- self.assertEqual(ast.dump(a), ast.dump(b))
-
- def test_parse_in_error(self):
- try:
- 1/0
- except Exception:
- with self.assertRaises(SyntaxError) as e:
- ast.literal_eval(r"'\U'")
- self.assertIsNotNone(e.exception.__context__)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_dump(self):
- node = ast.parse('spam(eggs, "and cheese")')
- self.assertEqual(ast.dump(node),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
- "args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')], "
- "keywords=[]))], type_ignores=[])"
- )
- self.assertEqual(ast.dump(node, annotate_fields=False),
- "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
- "Constant('and cheese')], []))], [])"
- )
- self.assertEqual(ast.dump(node, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
- "lineno=1, col_offset=0, end_lineno=1, end_col_offset=4), "
- "args=[Name(id='eggs', ctx=Load(), lineno=1, col_offset=5, "
- "end_lineno=1, end_col_offset=9), Constant(value='and cheese', "
- "lineno=1, col_offset=11, end_lineno=1, end_col_offset=23)], keywords=[], "
- "lineno=1, col_offset=0, end_lineno=1, end_col_offset=24), "
- "lineno=1, col_offset=0, end_lineno=1, end_col_offset=24)], type_ignores=[])"
- )
-
- # TODO: RUSTPYTHON; redundant kind for Contant node
- @unittest.expectedFailure
- def test_dump_indent(self):
- node = ast.parse('spam(eggs, "and cheese")')
- self.assertEqual(ast.dump(node, indent=3), """\
-Module(
- body=[
- Expr(
- value=Call(
- func=Name(id='spam', ctx=Load()),
- args=[
- Name(id='eggs', ctx=Load()),
- Constant(value='and cheese')],
- keywords=[]))],
- type_ignores=[])""")
- self.assertEqual(ast.dump(node, annotate_fields=False, indent='\t'), """\
-Module(
-\t[
-\t\tExpr(
-\t\t\tCall(
-\t\t\t\tName('spam', Load()),
-\t\t\t\t[
-\t\t\t\t\tName('eggs', Load()),
-\t\t\t\t\tConstant('and cheese')],
-\t\t\t\t[]))],
-\t[])""")
- self.assertEqual(ast.dump(node, include_attributes=True, indent=3), """\
-Module(
- body=[
- Expr(
- value=Call(
- func=Name(
- id='spam',
- ctx=Load(),
- lineno=1,
- col_offset=0,
- end_lineno=1,
- end_col_offset=4),
- args=[
- Name(
- id='eggs',
- ctx=Load(),
- lineno=1,
- col_offset=5,
- end_lineno=1,
- end_col_offset=9),
- Constant(
- value='and cheese',
- lineno=1,
- col_offset=11,
- end_lineno=1,
- end_col_offset=23)],
- keywords=[],
- lineno=1,
- col_offset=0,
- end_lineno=1,
- end_col_offset=24),
- lineno=1,
- col_offset=0,
- end_lineno=1,
- end_col_offset=24)],
- type_ignores=[])""")
-
- def test_dump_incomplete(self):
- node = ast.Raise(lineno=3, col_offset=4)
- self.assertEqual(ast.dump(node),
- "Raise()"
- )
- self.assertEqual(ast.dump(node, include_attributes=True),
- "Raise(lineno=3, col_offset=4)"
- )
- node = ast.Raise(exc=ast.Name(id='e', ctx=ast.Load()), lineno=3, col_offset=4)
- self.assertEqual(ast.dump(node),
- "Raise(exc=Name(id='e', ctx=Load()))"
- )
- self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(Name('e', Load()))"
- )
- self.assertEqual(ast.dump(node, include_attributes=True),
- "Raise(exc=Name(id='e', ctx=Load()), lineno=3, col_offset=4)"
- )
- self.assertEqual(ast.dump(node, annotate_fields=False, include_attributes=True),
- "Raise(Name('e', Load()), lineno=3, col_offset=4)"
- )
- node = ast.Raise(cause=ast.Name(id='e', ctx=ast.Load()))
- self.assertEqual(ast.dump(node),
- "Raise(cause=Name(id='e', ctx=Load()))"
- )
- self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(cause=Name('e', Load()))"
- )
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_copy_location(self):
- src = ast.parse('1 + 1', mode='eval')
- src.body.right = ast.copy_location(ast.Constant(2), src.body.right)
- self.assertEqual(ast.dump(src, include_attributes=True),
- 'Expression(body=BinOp(left=Constant(value=1, lineno=1, col_offset=0, '
- 'end_lineno=1, end_col_offset=1), op=Add(), right=Constant(value=2, '
- 'lineno=1, col_offset=4, end_lineno=1, end_col_offset=5), lineno=1, '
- 'col_offset=0, end_lineno=1, end_col_offset=5))'
- )
- src = ast.Call(col_offset=1, lineno=1, end_lineno=1, end_col_offset=1)
- new = ast.copy_location(src, ast.Call(col_offset=None, lineno=None))
- self.assertIsNone(new.end_lineno)
- self.assertIsNone(new.end_col_offset)
- self.assertEqual(new.lineno, 1)
- self.assertEqual(new.col_offset, 1)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_fix_missing_locations(self):
- src = ast.parse('write("spam")')
- src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
- [ast.Constant('eggs')], [])))
- self.assertEqual(src, ast.fix_missing_locations(src))
- self.maxDiff = None
- self.assertEqual(ast.dump(src, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
- "lineno=1, col_offset=0, end_lineno=1, end_col_offset=5), "
- "args=[Constant(value='spam', lineno=1, col_offset=6, end_lineno=1, "
- "end_col_offset=12)], keywords=[], lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=13), lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=13), Expr(value=Call(func=Name(id='spam', ctx=Load(), "
- "lineno=1, col_offset=0, end_lineno=1, end_col_offset=0), "
- "args=[Constant(value='eggs', lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=0)], keywords=[], lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=0), lineno=1, col_offset=0, end_lineno=1, end_col_offset=0)], "
- "type_ignores=[])"
- )
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_increment_lineno(self):
- src = ast.parse('1 + 1', mode='eval')
- self.assertEqual(ast.increment_lineno(src, n=3), src)
- self.assertEqual(ast.dump(src, include_attributes=True),
- 'Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, '
- 'end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, '
- 'lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, '
- 'col_offset=0, end_lineno=4, end_col_offset=5))'
- )
- # issue10869: do not increment lineno of root twice
- src = ast.parse('1 + 1', mode='eval')
- self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
- self.assertEqual(ast.dump(src, include_attributes=True),
- 'Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, '
- 'end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, '
- 'lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, '
- 'col_offset=0, end_lineno=4, end_col_offset=5))'
- )
- src = ast.Call(
- func=ast.Name("test", ast.Load()), args=[], keywords=[], lineno=1
- )
- self.assertEqual(ast.increment_lineno(src).lineno, 2)
- self.assertIsNone(ast.increment_lineno(src).end_lineno)
-
- @unittest.skip("TODO: RUSTPYTHON, NameError: name 'PyCF_TYPE_COMMENTS' is not defined")
- def test_increment_lineno_on_module(self):
- src = ast.parse(dedent("""\
- a = 1
- b = 2 # type: ignore
- c = 3
- d = 4 # type: ignore@tag
- """), type_comments=True)
- ast.increment_lineno(src, n=5)
- self.assertEqual(src.type_ignores[0].lineno, 7)
- self.assertEqual(src.type_ignores[1].lineno, 9)
- self.assertEqual(src.type_ignores[1].tag, '@tag')
-
- def test_iter_fields(self):
- node = ast.parse('foo()', mode='eval')
- d = dict(ast.iter_fields(node.body))
- self.assertEqual(d.pop('func').id, 'foo')
- self.assertEqual(d, {'keywords': [], 'args': []})
-
- # TODO: RUSTPYTHON; redundant kind for Constant node
- @unittest.expectedFailure
- def test_iter_child_nodes(self):
- node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
- self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
- iterator = ast.iter_child_nodes(node.body)
- self.assertEqual(next(iterator).id, 'spam')
- self.assertEqual(next(iterator).value, 23)
- self.assertEqual(next(iterator).value, 42)
- self.assertEqual(ast.dump(next(iterator)),
- "keyword(arg='eggs', value=Constant(value='leek'))"
- )
-
- def test_get_docstring(self):
- node = ast.parse('"""line one\n line two"""')
- self.assertEqual(ast.get_docstring(node),
- 'line one\nline two')
-
- node = ast.parse('class foo:\n """line one\n line two"""')
- self.assertEqual(ast.get_docstring(node.body[0]),
- 'line one\nline two')
-
- node = ast.parse('def foo():\n """line one\n line two"""')
- self.assertEqual(ast.get_docstring(node.body[0]),
- 'line one\nline two')
-
- node = ast.parse('async def foo():\n """spam\n ham"""')
- self.assertEqual(ast.get_docstring(node.body[0]), 'spam\nham')
-
- def test_get_docstring_none(self):
- self.assertIsNone(ast.get_docstring(ast.parse('')))
- node = ast.parse('x = "not docstring"')
- self.assertIsNone(ast.get_docstring(node))
- node = ast.parse('def foo():\n pass')
- self.assertIsNone(ast.get_docstring(node))
-
- node = ast.parse('class foo:\n pass')
- self.assertIsNone(ast.get_docstring(node.body[0]))
- node = ast.parse('class foo:\n x = "not docstring"')
- self.assertIsNone(ast.get_docstring(node.body[0]))
- node = ast.parse('class foo:\n def bar(self): pass')
- self.assertIsNone(ast.get_docstring(node.body[0]))
-
- node = ast.parse('def foo():\n pass')
- self.assertIsNone(ast.get_docstring(node.body[0]))
- node = ast.parse('def foo():\n x = "not docstring"')
- self.assertIsNone(ast.get_docstring(node.body[0]))
-
- node = ast.parse('async def foo():\n pass')
- self.assertIsNone(ast.get_docstring(node.body[0]))
- node = ast.parse('async def foo():\n x = "not docstring"')
- self.assertIsNone(ast.get_docstring(node.body[0]))
-
- def test_multi_line_docstring_col_offset_and_lineno_issue16806(self):
- node = ast.parse(
- '"""line one\nline two"""\n\n'
- 'def foo():\n """line one\n line two"""\n\n'
- ' def bar():\n """line one\n line two"""\n'
- ' """line one\n line two"""\n'
- '"""line one\nline two"""\n\n'
- )
- self.assertEqual(node.body[0].col_offset, 0)
- self.assertEqual(node.body[0].lineno, 1)
- self.assertEqual(node.body[1].body[0].col_offset, 2)
- self.assertEqual(node.body[1].body[0].lineno, 5)
- self.assertEqual(node.body[1].body[1].body[0].col_offset, 4)
- self.assertEqual(node.body[1].body[1].body[0].lineno, 9)
- self.assertEqual(node.body[1].body[2].col_offset, 2)
- self.assertEqual(node.body[1].body[2].lineno, 11)
- self.assertEqual(node.body[2].col_offset, 0)
- self.assertEqual(node.body[2].lineno, 13)
-
- def test_elif_stmt_start_position(self):
- node = ast.parse('if a:\n pass\nelif b:\n pass\n')
- elif_stmt = node.body[0].orelse[0]
- self.assertEqual(elif_stmt.lineno, 3)
- self.assertEqual(elif_stmt.col_offset, 0)
-
- def test_elif_stmt_start_position_with_else(self):
- node = ast.parse('if a:\n pass\nelif b:\n pass\nelse:\n pass\n')
- elif_stmt = node.body[0].orelse[0]
- self.assertEqual(elif_stmt.lineno, 3)
- self.assertEqual(elif_stmt.col_offset, 0)
-
- def test_starred_expr_end_position_within_call(self):
- node = ast.parse('f(*[0, 1])')
- starred_expr = node.body[0].value.args[0]
- self.assertEqual(starred_expr.end_lineno, 1)
- self.assertEqual(starred_expr.end_col_offset, 9)
-
- def test_literal_eval(self):
- self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
- self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
- self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
- self.assertEqual(ast.literal_eval('{1, 2, 3}'), {1, 2, 3})
- self.assertEqual(ast.literal_eval('b"hi"'), b"hi")
- self.assertEqual(ast.literal_eval('set()'), set())
- self.assertRaises(ValueError, ast.literal_eval, 'foo()')
- self.assertEqual(ast.literal_eval('6'), 6)
- self.assertEqual(ast.literal_eval('+6'), 6)
- self.assertEqual(ast.literal_eval('-6'), -6)
- self.assertEqual(ast.literal_eval('3.25'), 3.25)
- self.assertEqual(ast.literal_eval('+3.25'), 3.25)
- self.assertEqual(ast.literal_eval('-3.25'), -3.25)
- self.assertEqual(repr(ast.literal_eval('-0.0')), '-0.0')
- self.assertRaises(ValueError, ast.literal_eval, '++6')
- self.assertRaises(ValueError, ast.literal_eval, '+True')
- self.assertRaises(ValueError, ast.literal_eval, '2+3')
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_literal_eval_str_int_limit(self):
- with support.adjust_int_max_str_digits(4000):
- ast.literal_eval('3'*4000) # no error
- with self.assertRaises(SyntaxError) as err_ctx:
- ast.literal_eval('3'*4001)
- self.assertIn('Exceeds the limit ', str(err_ctx.exception))
- self.assertIn(' Consider hexadecimal ', str(err_ctx.exception))
-
- def test_literal_eval_complex(self):
- # Issue #4907
- self.assertEqual(ast.literal_eval('6j'), 6j)
- self.assertEqual(ast.literal_eval('-6j'), -6j)
- self.assertEqual(ast.literal_eval('6.75j'), 6.75j)
- self.assertEqual(ast.literal_eval('-6.75j'), -6.75j)
- self.assertEqual(ast.literal_eval('3+6j'), 3+6j)
- self.assertEqual(ast.literal_eval('-3+6j'), -3+6j)
- self.assertEqual(ast.literal_eval('3-6j'), 3-6j)
- self.assertEqual(ast.literal_eval('-3-6j'), -3-6j)
- self.assertEqual(ast.literal_eval('3.25+6.75j'), 3.25+6.75j)
- self.assertEqual(ast.literal_eval('-3.25+6.75j'), -3.25+6.75j)
- self.assertEqual(ast.literal_eval('3.25-6.75j'), 3.25-6.75j)
- self.assertEqual(ast.literal_eval('-3.25-6.75j'), -3.25-6.75j)
- self.assertEqual(ast.literal_eval('(3+6j)'), 3+6j)
- self.assertRaises(ValueError, ast.literal_eval, '-6j+3')
- self.assertRaises(ValueError, ast.literal_eval, '-6j+3j')
- self.assertRaises(ValueError, ast.literal_eval, '3+-6j')
- self.assertRaises(ValueError, ast.literal_eval, '3+(0+6j)')
- self.assertRaises(ValueError, ast.literal_eval, '-(3+6j)')
-
- def test_literal_eval_malformed_dict_nodes(self):
- malformed = ast.Dict(keys=[ast.Constant(1), ast.Constant(2)], values=[ast.Constant(3)])
- self.assertRaises(ValueError, ast.literal_eval, malformed)
- malformed = ast.Dict(keys=[ast.Constant(1)], values=[ast.Constant(2), ast.Constant(3)])
- self.assertRaises(ValueError, ast.literal_eval, malformed)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_literal_eval_trailing_ws(self):
- self.assertEqual(ast.literal_eval(" -1"), -1)
- self.assertEqual(ast.literal_eval("\t\t-1"), -1)
- self.assertEqual(ast.literal_eval(" \t -1"), -1)
- self.assertRaises(IndentationError, ast.literal_eval, "\n -1")
-
- def test_literal_eval_malformed_lineno(self):
- msg = r'malformed node or string on line 3:'
- with self.assertRaisesRegex(ValueError, msg):
- ast.literal_eval("{'a': 1,\n'b':2,\n'c':++3,\n'd':4}")
-
- node = ast.UnaryOp(
- ast.UAdd(), ast.UnaryOp(ast.UAdd(), ast.Constant(6)))
- self.assertIsNone(getattr(node, 'lineno', None))
- msg = r'malformed node or string:'
- with self.assertRaisesRegex(ValueError, msg):
- ast.literal_eval(node)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_literal_eval_syntax_errors(self):
- with self.assertRaisesRegex(SyntaxError, "unexpected indent"):
- ast.literal_eval(r'''
- \
- (\
- \ ''')
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_bad_integer(self):
- # issue13436: Bad error message with invalid numeric values
- body = [ast.ImportFrom(module='time',
- names=[ast.alias(name='sleep')],
- level=None,
- lineno=None, col_offset=None)]
- mod = ast.Module(body, [])
- with self.assertRaises(ValueError) as cm:
- compile(mod, 'test', 'exec')
- self.assertIn("invalid integer value: None", str(cm.exception))
-
- # XXX RUSTPYTHON: we always require that end ranges be present
- @unittest.expectedFailure
- def test_level_as_none(self):
- body = [ast.ImportFrom(module='time',
- names=[ast.alias(name='sleep',
- lineno=0, col_offset=0)],
- level=None,
- lineno=0, col_offset=0)]
- mod = ast.Module(body, [])
- code = compile(mod, 'test', 'exec')
- ns = {}
- exec(code, ns)
- self.assertIn('sleep', ns)
-
- @unittest.skip("TODO: RUSTPYTHON; crash")
- def test_recursion_direct(self):
- e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
- e.operand = e
- with self.assertRaises(RecursionError):
- with support.infinite_recursion():
- compile(ast.Expression(e), "", "eval")
-
- @unittest.skip("TODO: RUSTPYTHON; crash")
- def test_recursion_indirect(self):
- e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
- f = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0)
- e.operand = f
- f.operand = e
- with self.assertRaises(RecursionError):
- with support.infinite_recursion():
- compile(ast.Expression(e), "", "eval")
-
-
-class ASTValidatorTests(unittest.TestCase):
-
- def mod(self, mod, msg=None, mode="exec", *, exc=ValueError):
- mod.lineno = mod.col_offset = 0
- ast.fix_missing_locations(mod)
- if msg is None:
- compile(mod, "", mode)
- else:
- with self.assertRaises(exc) as cm:
- compile(mod, "", mode)
- self.assertIn(msg, str(cm.exception))
-
- def expr(self, node, msg=None, *, exc=ValueError):
- mod = ast.Module([ast.Expr(node)], [])
- self.mod(mod, msg, exc=exc)
-
- def stmt(self, stmt, msg=None):
- mod = ast.Module([stmt], [])
- self.mod(mod, msg)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_module(self):
- m = ast.Interactive([ast.Expr(ast.Name("x", ast.Store()))])
- self.mod(m, "must have Load context", "single")
- m = ast.Expression(ast.Name("x", ast.Store()))
- self.mod(m, "must have Load context", "eval")
-
- def _check_arguments(self, fac, check):
- def arguments(args=None, posonlyargs=None, vararg=None,
- kwonlyargs=None, kwarg=None,
- defaults=None, kw_defaults=None):
- if args is None:
- args = []
- if posonlyargs is None:
- posonlyargs = []
- if kwonlyargs is None:
- kwonlyargs = []
- if defaults is None:
- defaults = []
- if kw_defaults is None:
- kw_defaults = []
- args = ast.arguments(args, posonlyargs, vararg, kwonlyargs,
- kw_defaults, kwarg, defaults)
- return fac(args)
- args = [ast.arg("x", ast.Name("x", ast.Store()))]
- check(arguments(args=args), "must have Load context")
- check(arguments(posonlyargs=args), "must have Load context")
- check(arguments(kwonlyargs=args), "must have Load context")
- check(arguments(defaults=[ast.Constant(3)]),
- "more positional defaults than args")
- check(arguments(kw_defaults=[ast.Constant(4)]),
- "length of kwonlyargs is not the same as kw_defaults")
- args = [ast.arg("x", ast.Name("x", ast.Load()))]
- check(arguments(args=args, defaults=[ast.Name("x", ast.Store())]),
- "must have Load context")
- args = [ast.arg("a", ast.Name("x", ast.Load())),
- ast.arg("b", ast.Name("y", ast.Load()))]
- check(arguments(kwonlyargs=args,
- kw_defaults=[None, ast.Name("x", ast.Store())]),
- "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_funcdef(self):
- a = ast.arguments([], [], None, [], [], None, [])
- f = ast.FunctionDef("x", a, [], [], None, None, [])
- self.stmt(f, "empty body on FunctionDef")
- f = ast.FunctionDef("x", a, [ast.Pass()], [ast.Name("x", ast.Store())], None, None, [])
- self.stmt(f, "must have Load context")
- f = ast.FunctionDef("x", a, [ast.Pass()], [],
- ast.Name("x", ast.Store()), None, [])
- self.stmt(f, "must have Load context")
- f = ast.FunctionDef("x", ast.arguments(), [ast.Pass()])
- self.stmt(f)
- def fac(args):
- return ast.FunctionDef("x", args, [ast.Pass()], [], None, None, [])
- self._check_arguments(fac, self.stmt)
-
- # TODO: RUSTPYTHON, match expression is not implemented yet
- # def test_funcdef_pattern_matching(self):
- # # gh-104799: New fields on FunctionDef should be added at the end
- # def matcher(node):
- # match node:
- # case ast.FunctionDef("foo", ast.arguments(args=[ast.arg("bar")]),
- # [ast.Pass()],
- # [ast.Name("capybara", ast.Load())],
- # ast.Name("pacarana", ast.Load())):
- # return True
- # case _:
- # return False
-
- # code = """
- # @capybara
- # def foo(bar) -> pacarana:
- # pass
- # """
- # source = ast.parse(textwrap.dedent(code))
- # funcdef = source.body[0]
- # self.assertIsInstance(funcdef, ast.FunctionDef)
- # self.assertTrue(matcher(funcdef))
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_classdef(self):
- def cls(bases=None, keywords=None, body=None, decorator_list=None, type_params=None):
- if bases is None:
- bases = []
- if keywords is None:
- keywords = []
- if body is None:
- body = [ast.Pass()]
- if decorator_list is None:
- decorator_list = []
- if type_params is None:
- type_params = []
- return ast.ClassDef("myclass", bases, keywords,
- body, decorator_list, type_params)
- self.stmt(cls(bases=[ast.Name("x", ast.Store())]),
- "must have Load context")
- self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store()))]),
- "must have Load context")
- self.stmt(cls(body=[]), "empty body on ClassDef")
- self.stmt(cls(body=[None]), "None disallowed")
- self.stmt(cls(decorator_list=[ast.Name("x", ast.Store())]),
- "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_delete(self):
- self.stmt(ast.Delete([]), "empty targets on Delete")
- self.stmt(ast.Delete([None]), "None disallowed")
- self.stmt(ast.Delete([ast.Name("x", ast.Load())]),
- "must have Del context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_assign(self):
- self.stmt(ast.Assign([], ast.Constant(3)), "empty targets on Assign")
- self.stmt(ast.Assign([None], ast.Constant(3)), "None disallowed")
- self.stmt(ast.Assign([ast.Name("x", ast.Load())], ast.Constant(3)),
- "must have Store context")
- self.stmt(ast.Assign([ast.Name("x", ast.Store())],
- ast.Name("y", ast.Store())),
- "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_augassign(self):
- aug = ast.AugAssign(ast.Name("x", ast.Load()), ast.Add(),
- ast.Name("y", ast.Load()))
- self.stmt(aug, "must have Store context")
- aug = ast.AugAssign(ast.Name("x", ast.Store()), ast.Add(),
- ast.Name("y", ast.Store()))
- self.stmt(aug, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_for(self):
- x = ast.Name("x", ast.Store())
- y = ast.Name("y", ast.Load())
- p = ast.Pass()
- self.stmt(ast.For(x, y, [], []), "empty body on For")
- self.stmt(ast.For(ast.Name("x", ast.Load()), y, [p], []),
- "must have Store context")
- self.stmt(ast.For(x, ast.Name("y", ast.Store()), [p], []),
- "must have Load context")
- e = ast.Expr(ast.Name("x", ast.Store()))
- self.stmt(ast.For(x, y, [e], []), "must have Load context")
- self.stmt(ast.For(x, y, [p], [e]), "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_while(self):
- self.stmt(ast.While(ast.Constant(3), [], []), "empty body on While")
- self.stmt(ast.While(ast.Name("x", ast.Store()), [ast.Pass()], []),
- "must have Load context")
- self.stmt(ast.While(ast.Constant(3), [ast.Pass()],
- [ast.Expr(ast.Name("x", ast.Store()))]),
- "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_if(self):
- self.stmt(ast.If(ast.Constant(3), [], []), "empty body on If")
- i = ast.If(ast.Name("x", ast.Store()), [ast.Pass()], [])
- self.stmt(i, "must have Load context")
- i = ast.If(ast.Constant(3), [ast.Expr(ast.Name("x", ast.Store()))], [])
- self.stmt(i, "must have Load context")
- i = ast.If(ast.Constant(3), [ast.Pass()],
- [ast.Expr(ast.Name("x", ast.Store()))])
- self.stmt(i, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_with(self):
- p = ast.Pass()
- self.stmt(ast.With([], [p]), "empty items on With")
- i = ast.withitem(ast.Constant(3), None)
- self.stmt(ast.With([i], []), "empty body on With")
- i = ast.withitem(ast.Name("x", ast.Store()), None)
- self.stmt(ast.With([i], [p]), "must have Load context")
- i = ast.withitem(ast.Constant(3), ast.Name("x", ast.Load()))
- self.stmt(ast.With([i], [p]), "must have Store context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_raise(self):
- r = ast.Raise(None, ast.Constant(3))
- self.stmt(r, "Raise with cause but no exception")
- r = ast.Raise(ast.Name("x", ast.Store()), None)
- self.stmt(r, "must have Load context")
- r = ast.Raise(ast.Constant(4), ast.Name("x", ast.Store()))
- self.stmt(r, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_try(self):
- p = ast.Pass()
- t = ast.Try([], [], [], [p])
- self.stmt(t, "empty body on Try")
- t = ast.Try([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
- self.stmt(t, "must have Load context")
- t = ast.Try([p], [], [], [])
- self.stmt(t, "Try has neither except handlers nor finalbody")
- t = ast.Try([p], [], [p], [p])
- self.stmt(t, "Try has orelse but no except handlers")
- t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], [])
- self.stmt(t, "empty body on ExceptHandler")
- e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
- self.stmt(ast.Try([p], e, [], []), "must have Load context")
- e = [ast.ExceptHandler(None, "x", [p])]
- t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
- self.stmt(t, "must have Load context")
- t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
- self.stmt(t, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.skip("TODO: RUSTPYTHON, SyntaxError: RustPython does not implement this feature yet")
- def test_try_star(self):
- p = ast.Pass()
- t = ast.TryStar([], [], [], [p])
- self.stmt(t, "empty body on TryStar")
- t = ast.TryStar([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
- self.stmt(t, "must have Load context")
- t = ast.TryStar([p], [], [], [])
- self.stmt(t, "TryStar has neither except handlers nor finalbody")
- t = ast.TryStar([p], [], [p], [p])
- self.stmt(t, "TryStar has orelse but no except handlers")
- t = ast.TryStar([p], [ast.ExceptHandler(None, "x", [])], [], [])
- self.stmt(t, "empty body on ExceptHandler")
- e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
- self.stmt(ast.TryStar([p], e, [], []), "must have Load context")
- e = [ast.ExceptHandler(None, "x", [p])]
- t = ast.TryStar([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
- self.stmt(t, "must have Load context")
- t = ast.TryStar([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
- self.stmt(t, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_assert(self):
- self.stmt(ast.Assert(ast.Name("x", ast.Store()), None),
- "must have Load context")
- assrt = ast.Assert(ast.Name("x", ast.Load()),
- ast.Name("y", ast.Store()))
- self.stmt(assrt, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_import(self):
- self.stmt(ast.Import([]), "empty names on Import")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_importfrom(self):
- imp = ast.ImportFrom(None, [ast.alias("x", None)], -42)
- self.stmt(imp, "Negative ImportFrom level")
- self.stmt(ast.ImportFrom(None, [], 0), "empty names on ImportFrom")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_global(self):
- self.stmt(ast.Global([]), "empty names on Global")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_nonlocal(self):
- self.stmt(ast.Nonlocal([]), "empty names on Nonlocal")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_expr(self):
- e = ast.Expr(ast.Name("x", ast.Store()))
- self.stmt(e, "must have Load context")
-
- @unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'called `Option::unwrap()` on a `None` value'")
- def test_boolop(self):
- b = ast.BoolOp(ast.And(), [])
- self.expr(b, "less than 2 values")
- b = ast.BoolOp(ast.And(), [ast.Constant(3)])
- self.expr(b, "less than 2 values")
- b = ast.BoolOp(ast.And(), [ast.Constant(4), None])
- self.expr(b, "None disallowed")
- b = ast.BoolOp(ast.And(), [ast.Constant(4), ast.Name("x", ast.Store())])
- self.expr(b, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_unaryop(self):
- u = ast.UnaryOp(ast.Not(), ast.Name("x", ast.Store()))
- self.expr(u, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_lambda(self):
- a = ast.arguments([], [], None, [], [], None, [])
- self.expr(ast.Lambda(a, ast.Name("x", ast.Store())),
- "must have Load context")
- def fac(args):
- return ast.Lambda(args, ast.Name("x", ast.Load()))
- self._check_arguments(fac, self.expr)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_ifexp(self):
- l = ast.Name("x", ast.Load())
- s = ast.Name("y", ast.Store())
- for args in (s, l, l), (l, s, l), (l, l, s):
- self.expr(ast.IfExp(*args), "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_dict(self):
- d = ast.Dict([], [ast.Name("x", ast.Load())])
- self.expr(d, "same number of keys as values")
- d = ast.Dict([ast.Name("x", ast.Load())], [None])
- self.expr(d, "None disallowed")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_set(self):
- self.expr(ast.Set([None]), "None disallowed")
- s = ast.Set([ast.Name("x", ast.Store())])
- self.expr(s, "must have Load context")
-
- def _check_comprehension(self, fac):
- self.expr(fac([]), "comprehension with no generators")
- g = ast.comprehension(ast.Name("x", ast.Load()),
- ast.Name("x", ast.Load()), [], 0)
- self.expr(fac([g]), "must have Store context")
- g = ast.comprehension(ast.Name("x", ast.Store()),
- ast.Name("x", ast.Store()), [], 0)
- self.expr(fac([g]), "must have Load context")
- x = ast.Name("x", ast.Store())
- y = ast.Name("y", ast.Load())
- g = ast.comprehension(x, y, [None], 0)
- self.expr(fac([g]), "None disallowed")
- g = ast.comprehension(x, y, [ast.Name("x", ast.Store())], 0)
- self.expr(fac([g]), "must have Load context")
-
- def _simple_comp(self, fac):
- g = ast.comprehension(ast.Name("x", ast.Store()),
- ast.Name("x", ast.Load()), [], 0)
- self.expr(fac(ast.Name("x", ast.Store()), [g]),
- "must have Load context")
- def wrap(gens):
- return fac(ast.Name("x", ast.Store()), gens)
- self._check_comprehension(wrap)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_listcomp(self):
- self._simple_comp(ast.ListComp)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_setcomp(self):
- self._simple_comp(ast.SetComp)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_generatorexp(self):
- self._simple_comp(ast.GeneratorExp)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_dictcomp(self):
- g = ast.comprehension(ast.Name("y", ast.Store()),
- ast.Name("p", ast.Load()), [], 0)
- c = ast.DictComp(ast.Name("x", ast.Store()),
- ast.Name("y", ast.Load()), [g])
- self.expr(c, "must have Load context")
- c = ast.DictComp(ast.Name("x", ast.Load()),
- ast.Name("y", ast.Store()), [g])
- self.expr(c, "must have Load context")
- def factory(comps):
- k = ast.Name("x", ast.Load())
- v = ast.Name("y", ast.Load())
- return ast.DictComp(k, v, comps)
- self._check_comprehension(factory)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_yield(self):
- self.expr(ast.Yield(ast.Name("x", ast.Store())), "must have Load")
- self.expr(ast.YieldFrom(ast.Name("x", ast.Store())), "must have Load")
-
- @unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'assertion failed: `(left == right)` left: `0`, right: `1`'")
- def test_compare(self):
- left = ast.Name("x", ast.Load())
- comp = ast.Compare(left, [ast.In()], [])
- self.expr(comp, "no comparators")
- comp = ast.Compare(left, [ast.In()], [ast.Constant(4), ast.Constant(5)])
- self.expr(comp, "different number of comparators and operands")
- comp = ast.Compare(ast.Constant("blah"), [ast.In()], [left])
- self.expr(comp)
- comp = ast.Compare(left, [ast.In()], [ast.Constant("blah")])
- self.expr(comp)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_call(self):
- func = ast.Name("x", ast.Load())
- args = [ast.Name("y", ast.Load())]
- keywords = [ast.keyword("w", ast.Name("z", ast.Load()))]
- call = ast.Call(ast.Name("x", ast.Store()), args, keywords)
- self.expr(call, "must have Load context")
- call = ast.Call(func, [None], keywords)
- self.expr(call, "None disallowed")
- bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store()))]
- call = ast.Call(func, args, bad_keywords)
- self.expr(call, "must have Load context")
-
- def test_num(self):
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import Num
-
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- class subint(int):
- pass
- class subfloat(float):
- pass
- class subcomplex(complex):
- pass
- for obj in "0", "hello":
- self.expr(ast.Num(obj))
- for obj in subint(), subfloat(), subcomplex():
- self.expr(ast.Num(obj), "invalid type", exc=TypeError)
-
- self.assertEqual([str(w.message) for w in wlog], [
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- 'ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- ])
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_attribute(self):
- attr = ast.Attribute(ast.Name("x", ast.Store()), "y", ast.Load())
- self.expr(attr, "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_subscript(self):
- sub = ast.Subscript(ast.Name("x", ast.Store()), ast.Constant(3),
- ast.Load())
- self.expr(sub, "must have Load context")
- x = ast.Name("x", ast.Load())
- sub = ast.Subscript(x, ast.Name("y", ast.Store()),
- ast.Load())
- self.expr(sub, "must have Load context")
- s = ast.Name("x", ast.Store())
- for args in (s, None, None), (None, s, None), (None, None, s):
- sl = ast.Slice(*args)
- self.expr(ast.Subscript(x, sl, ast.Load()),
- "must have Load context")
- sl = ast.Tuple([], ast.Load())
- self.expr(ast.Subscript(x, sl, ast.Load()))
- sl = ast.Tuple([s], ast.Load())
- self.expr(ast.Subscript(x, sl, ast.Load()), "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_starred(self):
- left = ast.List([ast.Starred(ast.Name("x", ast.Load()), ast.Store())],
- ast.Store())
- assign = ast.Assign([left], ast.Constant(4))
- self.stmt(assign, "must have Store context")
-
- def _sequence(self, fac):
- self.expr(fac([None], ast.Load()), "None disallowed")
- self.expr(fac([ast.Name("x", ast.Store())], ast.Load()),
- "must have Load context")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_list(self):
- self._sequence(ast.List)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_tuple(self):
- self._sequence(ast.Tuple)
-
- def test_nameconstant(self):
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- from ast import NameConstant
-
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- self.expr(ast.NameConstant(4))
-
- self.assertEqual([str(w.message) for w in wlog], [
- 'ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead',
- ])
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- @support.requires_resource('cpu')
- def test_stdlib_validates(self):
- stdlib = os.path.dirname(ast.__file__)
- tests = [fn for fn in os.listdir(stdlib) if fn.endswith(".py")]
- tests.extend(["test/test_grammar.py", "test/test_unpack_ex.py"])
- for module in tests:
- with self.subTest(module):
- fn = os.path.join(stdlib, module)
- with open(fn, "r", encoding="utf-8") as fp:
- source = fp.read()
- mod = ast.parse(source, fn)
- compile(mod, fn, "exec")
-
- constant_1 = ast.Constant(1)
- pattern_1 = ast.MatchValue(constant_1)
-
- constant_x = ast.Constant('x')
- pattern_x = ast.MatchValue(constant_x)
-
- constant_true = ast.Constant(True)
- pattern_true = ast.MatchSingleton(True)
-
- name_carter = ast.Name('carter', ast.Load())
-
- _MATCH_PATTERNS = [
- ast.MatchValue(
- ast.Attribute(
- ast.Attribute(
- ast.Name('x', ast.Store()),
- 'y', ast.Load()
- ),
- 'z', ast.Load()
- )
- ),
- ast.MatchValue(
- ast.Attribute(
- ast.Attribute(
- ast.Name('x', ast.Load()),
- 'y', ast.Store()
- ),
- 'z', ast.Load()
- )
- ),
- ast.MatchValue(
- ast.Constant(...)
- ),
- ast.MatchValue(
- ast.Constant(True)
- ),
- ast.MatchValue(
- ast.Constant((1,2,3))
- ),
- ast.MatchSingleton('string'),
- ast.MatchSequence([
- ast.MatchSingleton('string')
- ]),
- ast.MatchSequence(
- [
- ast.MatchSequence(
- [
- ast.MatchSingleton('string')
- ]
- )
- ]
- ),
- ast.MatchMapping(
- [constant_1, constant_true],
- [pattern_x]
- ),
- ast.MatchMapping(
- [constant_true, constant_1],
- [pattern_x, pattern_1],
- rest='True'
- ),
- ast.MatchMapping(
- [constant_true, ast.Starred(ast.Name('lol', ast.Load()), ast.Load())],
- [pattern_x, pattern_1],
- rest='legit'
- ),
- ast.MatchClass(
- ast.Attribute(
- ast.Attribute(
- constant_x,
- 'y', ast.Load()),
- 'z', ast.Load()),
- patterns=[], kwd_attrs=[], kwd_patterns=[]
- ),
- ast.MatchClass(
- name_carter,
- patterns=[],
- kwd_attrs=['True'],
- kwd_patterns=[pattern_1]
- ),
- ast.MatchClass(
- name_carter,
- patterns=[],
- kwd_attrs=[],
- kwd_patterns=[pattern_1]
- ),
- ast.MatchClass(
- name_carter,
- patterns=[ast.MatchSingleton('string')],
- kwd_attrs=[],
- kwd_patterns=[]
- ),
- ast.MatchClass(
- name_carter,
- patterns=[ast.MatchStar()],
- kwd_attrs=[],
- kwd_patterns=[]
- ),
- ast.MatchClass(
- name_carter,
- patterns=[],
- kwd_attrs=[],
- kwd_patterns=[ast.MatchStar()]
- ),
- ast.MatchClass(
- constant_true, # invalid name
- patterns=[],
- kwd_attrs=['True'],
- kwd_patterns=[pattern_1]
- ),
- ast.MatchSequence(
- [
- ast.MatchStar("True")
- ]
- ),
- ast.MatchAs(
- name='False'
- ),
- ast.MatchOr(
- []
- ),
- ast.MatchOr(
- [pattern_1]
- ),
- ast.MatchOr(
- [pattern_1, pattern_x, ast.MatchSingleton('xxx')]
- ),
- ast.MatchAs(name="_"),
- ast.MatchStar(name="x"),
- ast.MatchSequence([ast.MatchStar("_")]),
- ast.MatchMapping([], [], rest="_"),
- ]
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_match_validation_pattern(self):
- name_x = ast.Name('x', ast.Load())
- for pattern in self._MATCH_PATTERNS:
- with self.subTest(ast.dump(pattern, indent=4)):
- node = ast.Match(
- subject=name_x,
- cases = [
- ast.match_case(
- pattern=pattern,
- body = [ast.Pass()]
- )
- ]
- )
- node = ast.fix_missing_locations(node)
- module = ast.Module([node], [])
- with self.assertRaises(ValueError):
- compile(module, "", "exec")
-
-
-class ConstantTests(unittest.TestCase):
- """Tests on the ast.Constant node type."""
-
- def compile_constant(self, value):
- tree = ast.parse("x = 123")
-
- node = tree.body[0].value
- new_node = ast.Constant(value=value)
- ast.copy_location(new_node, node)
- tree.body[0].value = new_node
-
- code = compile(tree, "", "exec")
-
- ns = {}
- exec(code, ns)
- return ns['x']
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_validation(self):
- with self.assertRaises(TypeError) as cm:
- self.compile_constant([1, 2, 3])
- self.assertEqual(str(cm.exception),
- "got an invalid type in Constant: list")
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_singletons(self):
- for const in (None, False, True, Ellipsis, b'', frozenset()):
- with self.subTest(const=const):
- value = self.compile_constant(const)
- self.assertIs(value, const)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_values(self):
- nested_tuple = (1,)
- nested_frozenset = frozenset({1})
- for level in range(3):
- nested_tuple = (nested_tuple, 2)
- nested_frozenset = frozenset({nested_frozenset, 2})
- values = (123, 123.0, 123j,
- "unicode", b'bytes',
- tuple("tuple"), frozenset("frozenset"),
- nested_tuple, nested_frozenset)
- for value in values:
- with self.subTest(value=value):
- result = self.compile_constant(value)
- self.assertEqual(result, value)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_assign_to_constant(self):
- tree = ast.parse("x = 1")
-
- target = tree.body[0].targets[0]
- new_target = ast.Constant(value=1)
- ast.copy_location(new_target, target)
- tree.body[0].targets[0] = new_target
-
- with self.assertRaises(ValueError) as cm:
- compile(tree, "string", "exec")
- self.assertEqual(str(cm.exception),
- "expression which can't be assigned "
- "to in Store context")
-
- def test_get_docstring(self):
- tree = ast.parse("'docstring'\nx = 1")
- self.assertEqual(ast.get_docstring(tree), 'docstring')
-
- def get_load_const(self, tree):
- # Compile to bytecode, disassemble and get parameter of LOAD_CONST
- # instructions
- co = compile(tree, '', 'exec')
- consts = []
- for instr in dis.get_instructions(co):
- if instr.opname == 'LOAD_CONST' or instr.opname == 'RETURN_CONST':
- consts.append(instr.argval)
- return consts
-
- @support.cpython_only
- def test_load_const(self):
- consts = [None,
- True, False,
- 124,
- 2.0,
- 3j,
- "unicode",
- b'bytes',
- (1, 2, 3)]
-
- code = '\n'.join(['x={!r}'.format(const) for const in consts])
- code += '\nx = ...'
- consts.extend((Ellipsis, None))
-
- tree = ast.parse(code)
- self.assertEqual(self.get_load_const(tree),
- consts)
-
- # Replace expression nodes with constants
- for assign, const in zip(tree.body, consts):
- assert isinstance(assign, ast.Assign), ast.dump(assign)
- new_node = ast.Constant(value=const)
- ast.copy_location(new_node, assign.value)
- assign.value = new_node
-
- self.assertEqual(self.get_load_const(tree),
- consts)
-
- def test_literal_eval(self):
- tree = ast.parse("1 + 2")
- binop = tree.body[0].value
-
- new_left = ast.Constant(value=10)
- ast.copy_location(new_left, binop.left)
- binop.left = new_left
-
- new_right = ast.Constant(value=20j)
- ast.copy_location(new_right, binop.right)
- binop.right = new_right
-
- self.assertEqual(ast.literal_eval(binop), 10+20j)
-
- def test_string_kind(self):
- c = ast.parse('"x"', mode='eval').body
- self.assertEqual(c.value, "x")
- self.assertEqual(c.kind, None)
-
- c = ast.parse('u"x"', mode='eval').body
- self.assertEqual(c.value, "x")
- self.assertEqual(c.kind, "u")
-
- c = ast.parse('r"x"', mode='eval').body
- self.assertEqual(c.value, "x")
- self.assertEqual(c.kind, None)
-
- c = ast.parse('b"x"', mode='eval').body
- self.assertEqual(c.value, b"x")
- self.assertEqual(c.kind, None)
-
-
-class EndPositionTests(unittest.TestCase):
- """Tests for end position of AST nodes.
-
- Testing end positions of nodes requires a bit of extra care
- because of how LL parsers work.
- """
- def _check_end_pos(self, ast_node, end_lineno, end_col_offset):
- self.assertEqual(ast_node.end_lineno, end_lineno)
- self.assertEqual(ast_node.end_col_offset, end_col_offset)
-
- def _check_content(self, source, ast_node, content):
- self.assertEqual(ast.get_source_segment(source, ast_node), content)
-
- def _parse_value(self, s):
- # Use duck-typing to support both single expression
- # and a right hand side of an assignment statement.
- return ast.parse(s).body[0].value
-
- def test_lambda(self):
- s = 'lambda x, *y: None'
- lam = self._parse_value(s)
- self._check_content(s, lam.body, 'None')
- self._check_content(s, lam.args.args[0], 'x')
- self._check_content(s, lam.args.vararg, 'y')
-
- def test_func_def(self):
- s = dedent('''
- def func(x: int,
- *args: str,
- z: float = 0,
- **kwargs: Any) -> bool:
- return True
- ''').strip()
- fdef = ast.parse(s).body[0]
- self._check_end_pos(fdef, 5, 15)
- self._check_content(s, fdef.body[0], 'return True')
- self._check_content(s, fdef.args.args[0], 'x: int')
- self._check_content(s, fdef.args.args[0].annotation, 'int')
- self._check_content(s, fdef.args.kwarg, 'kwargs: Any')
- self._check_content(s, fdef.args.kwarg.annotation, 'Any')
-
- def test_call(self):
- s = 'func(x, y=2, **kw)'
- call = self._parse_value(s)
- self._check_content(s, call.func, 'func')
- self._check_content(s, call.keywords[0].value, '2')
- self._check_content(s, call.keywords[1].value, 'kw')
-
- def test_call_noargs(self):
- s = 'x[0]()'
- call = self._parse_value(s)
- self._check_content(s, call.func, 'x[0]')
- self._check_end_pos(call, 1, 6)
-
- def test_class_def(self):
- s = dedent('''
- class C(A, B):
- x: int = 0
- ''').strip()
- cdef = ast.parse(s).body[0]
- self._check_end_pos(cdef, 2, 14)
- self._check_content(s, cdef.bases[1], 'B')
- self._check_content(s, cdef.body[0], 'x: int = 0')
-
- def test_class_kw(self):
- s = 'class S(metaclass=abc.ABCMeta): pass'
- cdef = ast.parse(s).body[0]
- self._check_content(s, cdef.keywords[0].value, 'abc.ABCMeta')
-
- def test_multi_line_str(self):
- s = dedent('''
- x = """Some multi-line text.
-
- It goes on starting from same indent."""
- ''').strip()
- assign = ast.parse(s).body[0]
- self._check_end_pos(assign, 3, 40)
- self._check_end_pos(assign.value, 3, 40)
-
- def test_continued_str(self):
- s = dedent('''
- x = "first part" \\
- "second part"
- ''').strip()
- assign = ast.parse(s).body[0]
- self._check_end_pos(assign, 2, 13)
- self._check_end_pos(assign.value, 2, 13)
-
- def test_suites(self):
- # We intentionally put these into the same string to check
- # that empty lines are not part of the suite.
- s = dedent('''
- while True:
- pass
-
- if one():
- x = None
- elif other():
- y = None
- else:
- z = None
-
- for x, y in stuff:
- assert True
-
- try:
- raise RuntimeError
- except TypeError as e:
- pass
-
- pass
- ''').strip()
- mod = ast.parse(s)
- while_loop = mod.body[0]
- if_stmt = mod.body[1]
- for_loop = mod.body[2]
- try_stmt = mod.body[3]
- pass_stmt = mod.body[4]
-
- self._check_end_pos(while_loop, 2, 8)
- self._check_end_pos(if_stmt, 9, 12)
- self._check_end_pos(for_loop, 12, 15)
- self._check_end_pos(try_stmt, 17, 8)
- self._check_end_pos(pass_stmt, 19, 4)
-
- self._check_content(s, while_loop.test, 'True')
- self._check_content(s, if_stmt.body[0], 'x = None')
- self._check_content(s, if_stmt.orelse[0].test, 'other()')
- self._check_content(s, for_loop.target, 'x, y')
- self._check_content(s, try_stmt.body[0], 'raise RuntimeError')
- self._check_content(s, try_stmt.handlers[0].type, 'TypeError')
-
- def test_fstring(self):
- s = 'x = f"abc {x + y} abc"'
- fstr = self._parse_value(s)
- binop = fstr.values[1].value
- self._check_content(s, binop, 'x + y')
-
- def test_fstring_multi_line(self):
- s = dedent('''
- f"""Some multi-line text.
- {
- arg_one
- +
- arg_two
- }
- It goes on..."""
- ''').strip()
- fstr = self._parse_value(s)
- binop = fstr.values[1].value
- self._check_end_pos(binop, 5, 7)
- self._check_content(s, binop.left, 'arg_one')
- self._check_content(s, binop.right, 'arg_two')
-
- def test_import_from_multi_line(self):
- s = dedent('''
- from x.y.z import (
- a, b, c as c
- )
- ''').strip()
- imp = ast.parse(s).body[0]
- self._check_end_pos(imp, 3, 1)
- self._check_end_pos(imp.names[2], 2, 16)
-
- def test_slices(self):
- s1 = 'f()[1, 2] [0]'
- s2 = 'x[ a.b: c.d]'
- sm = dedent('''
- x[ a.b: f () ,
- g () : c.d
- ]
- ''').strip()
- i1, i2, im = map(self._parse_value, (s1, s2, sm))
- self._check_content(s1, i1.value, 'f()[1, 2]')
- self._check_content(s1, i1.value.slice, '1, 2')
- self._check_content(s2, i2.slice.lower, 'a.b')
- self._check_content(s2, i2.slice.upper, 'c.d')
- self._check_content(sm, im.slice.elts[0].upper, 'f ()')
- self._check_content(sm, im.slice.elts[1].lower, 'g ()')
- self._check_end_pos(im, 3, 3)
-
- def test_binop(self):
- s = dedent('''
- (1 * 2 + (3 ) +
- 4
- )
- ''').strip()
- binop = self._parse_value(s)
- self._check_end_pos(binop, 2, 6)
- self._check_content(s, binop.right, '4')
- self._check_content(s, binop.left, '1 * 2 + (3 )')
- self._check_content(s, binop.left.right, '3')
-
- def test_boolop(self):
- s = dedent('''
- if (one_condition and
- (other_condition or yet_another_one)):
- pass
- ''').strip()
- bop = ast.parse(s).body[0].test
- self._check_end_pos(bop, 2, 44)
- self._check_content(s, bop.values[1],
- 'other_condition or yet_another_one')
-
- def test_tuples(self):
- s1 = 'x = () ;'
- s2 = 'x = 1 , ;'
- s3 = 'x = (1 , 2 ) ;'
- sm = dedent('''
- x = (
- a, b,
- )
- ''').strip()
- t1, t2, t3, tm = map(self._parse_value, (s1, s2, s3, sm))
- self._check_content(s1, t1, '()')
- self._check_content(s2, t2, '1 ,')
- self._check_content(s3, t3, '(1 , 2 )')
- self._check_end_pos(tm, 3, 1)
-
- def test_attribute_spaces(self):
- s = 'func(x. y .z)'
- call = self._parse_value(s)
- self._check_content(s, call, s)
- self._check_content(s, call.args[0], 'x. y .z')
-
- def test_redundant_parenthesis(self):
- s = '( ( ( a + b ) ) )'
- v = ast.parse(s).body[0].value
- self.assertEqual(type(v).__name__, 'BinOp')
- self._check_content(s, v, 'a + b')
- s2 = 'await ' + s
- v = ast.parse(s2).body[0].value.value
- self.assertEqual(type(v).__name__, 'BinOp')
- self._check_content(s2, v, 'a + b')
-
- def test_trailers_with_redundant_parenthesis(self):
- tests = (
- ('( ( ( a ) ) ) ( )', 'Call'),
- ('( ( ( a ) ) ) ( b )', 'Call'),
- ('( ( ( a ) ) ) [ b ]', 'Subscript'),
- ('( ( ( a ) ) ) . b', 'Attribute'),
- )
- for s, t in tests:
- with self.subTest(s):
- v = ast.parse(s).body[0].value
- self.assertEqual(type(v).__name__, t)
- self._check_content(s, v, s)
- s2 = 'await ' + s
- v = ast.parse(s2).body[0].value.value
- self.assertEqual(type(v).__name__, t)
- self._check_content(s2, v, s)
-
- def test_displays(self):
- s1 = '[{}, {1, }, {1, 2,} ]'
- s2 = '{a: b, f (): g () ,}'
- c1 = self._parse_value(s1)
- c2 = self._parse_value(s2)
- self._check_content(s1, c1.elts[0], '{}')
- self._check_content(s1, c1.elts[1], '{1, }')
- self._check_content(s1, c1.elts[2], '{1, 2,}')
- self._check_content(s2, c2.keys[1], 'f ()')
- self._check_content(s2, c2.values[1], 'g ()')
-
- def test_comprehensions(self):
- s = dedent('''
- x = [{x for x, y in stuff
- if cond.x} for stuff in things]
- ''').strip()
- cmp = self._parse_value(s)
- self._check_end_pos(cmp, 2, 37)
- self._check_content(s, cmp.generators[0].iter, 'things')
- self._check_content(s, cmp.elt.generators[0].iter, 'stuff')
- self._check_content(s, cmp.elt.generators[0].ifs[0], 'cond.x')
- self._check_content(s, cmp.elt.generators[0].target, 'x, y')
-
- def test_yield_await(self):
- s = dedent('''
- async def f():
- yield x
- await y
- ''').strip()
- fdef = ast.parse(s).body[0]
- self._check_content(s, fdef.body[0].value, 'yield x')
- self._check_content(s, fdef.body[1].value, 'await y')
-
- def test_source_segment_multi(self):
- s_orig = dedent('''
- x = (
- a, b,
- ) + ()
- ''').strip()
- s_tuple = dedent('''
- (
- a, b,
- )
- ''').strip()
- binop = self._parse_value(s_orig)
- self.assertEqual(ast.get_source_segment(s_orig, binop.left), s_tuple)
-
- def test_source_segment_padded(self):
- s_orig = dedent('''
- class C:
- def fun(self) -> None:
- "ЖЖЖЖЖ"
- ''').strip()
- s_method = ' def fun(self) -> None:\n' \
- ' "ЖЖЖЖЖ"'
- cdef = ast.parse(s_orig).body[0]
- self.assertEqual(ast.get_source_segment(s_orig, cdef.body[0], padded=True),
- s_method)
-
- def test_source_segment_endings(self):
- s = 'v = 1\r\nw = 1\nx = 1\n\ry = 1\rz = 1\r\n'
- v, w, x, y, z = ast.parse(s).body
- self._check_content(s, v, 'v = 1')
- self._check_content(s, w, 'w = 1')
- self._check_content(s, x, 'x = 1')
- self._check_content(s, y, 'y = 1')
- self._check_content(s, z, 'z = 1')
-
- def test_source_segment_tabs(self):
- s = dedent('''
- class C:
- \t\f def fun(self) -> None:
- \t\f pass
- ''').strip()
- s_method = ' \t\f def fun(self) -> None:\n' \
- ' \t\f pass'
-
- cdef = ast.parse(s).body[0]
- self.assertEqual(ast.get_source_segment(s, cdef.body[0], padded=True), s_method)
-
- def test_source_segment_newlines(self):
- s = 'def f():\n pass\ndef g():\r pass\r\ndef h():\r\n pass\r\n'
- f, g, h = ast.parse(s).body
- self._check_content(s, f, 'def f():\n pass')
- self._check_content(s, g, 'def g():\r pass')
- self._check_content(s, h, 'def h():\r\n pass')
-
- s = 'def f():\n a = 1\r b = 2\r\n c = 3\n'
- f = ast.parse(s).body[0]
- self._check_content(s, f, s.rstrip())
-
- def test_source_segment_missing_info(self):
- s = 'v = 1\r\nw = 1\nx = 1\n\ry = 1\r\n'
- v, w, x, y = ast.parse(s).body
- del v.lineno
- del w.end_lineno
- del x.col_offset
- del y.end_col_offset
- self.assertIsNone(ast.get_source_segment(s, v))
- self.assertIsNone(ast.get_source_segment(s, w))
- self.assertIsNone(ast.get_source_segment(s, x))
- self.assertIsNone(ast.get_source_segment(s, y))
-
-class BaseNodeVisitorCases:
- # Both `NodeVisitor` and `NodeTranformer` must raise these warnings:
- def test_old_constant_nodes(self):
- class Visitor(self.visitor_class):
- def visit_Num(self, node):
- log.append((node.lineno, 'Num', node.n))
- def visit_Str(self, node):
- log.append((node.lineno, 'Str', node.s))
- def visit_Bytes(self, node):
- log.append((node.lineno, 'Bytes', node.s))
- def visit_NameConstant(self, node):
- log.append((node.lineno, 'NameConstant', node.value))
- def visit_Ellipsis(self, node):
- log.append((node.lineno, 'Ellipsis', ...))
- mod = ast.parse(dedent('''\
- i = 42
- f = 4.25
- c = 4.25j
- s = 'string'
- b = b'bytes'
- t = True
- n = None
- e = ...
- '''))
- visitor = Visitor()
- log = []
- with warnings.catch_warnings(record=True) as wlog:
- warnings.filterwarnings('always', '', DeprecationWarning)
- visitor.visit(mod)
- self.assertEqual(log, [
- (1, 'Num', 42),
- (2, 'Num', 4.25),
- (3, 'Num', 4.25j),
- (4, 'Str', 'string'),
- (5, 'Bytes', b'bytes'),
- (6, 'NameConstant', True),
- (7, 'NameConstant', None),
- (8, 'Ellipsis', ...),
- ])
- self.assertEqual([str(w.message) for w in wlog], [
- 'visit_Num is deprecated; add visit_Constant',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'visit_Num is deprecated; add visit_Constant',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'visit_Num is deprecated; add visit_Constant',
- 'Attribute n is deprecated and will be removed in Python 3.14; use value instead',
- 'visit_Str is deprecated; add visit_Constant',
- 'Attribute s is deprecated and will be removed in Python 3.14; use value instead',
- 'visit_Bytes is deprecated; add visit_Constant',
- 'Attribute s is deprecated and will be removed in Python 3.14; use value instead',
- 'visit_NameConstant is deprecated; add visit_Constant',
- 'visit_NameConstant is deprecated; add visit_Constant',
- 'visit_Ellipsis is deprecated; add visit_Constant',
- ])
-
-
-class NodeVisitorTests(BaseNodeVisitorCases, unittest.TestCase):
- visitor_class = ast.NodeVisitor
-
-
-class NodeTransformerTests(ASTTestMixin, BaseNodeVisitorCases, unittest.TestCase):
- visitor_class = ast.NodeTransformer
-
- def assertASTTransformation(self, tranformer_class,
- initial_code, expected_code):
- initial_ast = ast.parse(dedent(initial_code))
- expected_ast = ast.parse(dedent(expected_code))
-
- tranformer = tranformer_class()
- result_ast = ast.fix_missing_locations(tranformer.visit(initial_ast))
-
- self.assertASTEqual(result_ast, expected_ast)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_node_remove_single(self):
- code = 'def func(arg) -> SomeType: ...'
- expected = 'def func(arg): ...'
-
- # Since `FunctionDef.returns` is defined as a single value, we test
- # the `if isinstance(old_value, AST):` branch here.
- class SomeTypeRemover(ast.NodeTransformer):
- def visit_Name(self, node: ast.Name):
- self.generic_visit(node)
- if node.id == 'SomeType':
- return None
- return node
-
- self.assertASTTransformation(SomeTypeRemover, code, expected)
-
- def test_node_remove_from_list(self):
- code = """
- def func(arg):
- print(arg)
- yield arg
- """
- expected = """
- def func(arg):
- print(arg)
- """
-
- # Since `FunctionDef.body` is defined as a list, we test
- # the `if isinstance(old_value, list):` branch here.
- class YieldRemover(ast.NodeTransformer):
- def visit_Expr(self, node: ast.Expr):
- self.generic_visit(node)
- if isinstance(node.value, ast.Yield):
- return None # Remove `yield` from a function
- return node
-
- self.assertASTTransformation(YieldRemover, code, expected)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_node_return_list(self):
- code = """
- class DSL(Base, kw1=True): ...
- """
- expected = """
- class DSL(Base, kw1=True, kw2=True, kw3=False): ...
- """
-
- class ExtendKeywords(ast.NodeTransformer):
- def visit_keyword(self, node: ast.keyword):
- self.generic_visit(node)
- if node.arg == 'kw1':
- return [
- node,
- ast.keyword('kw2', ast.Constant(True)),
- ast.keyword('kw3', ast.Constant(False)),
- ]
- return node
-
- self.assertASTTransformation(ExtendKeywords, code, expected)
-
- def test_node_mutate(self):
- code = """
- def func(arg):
- print(arg)
- """
- expected = """
- def func(arg):
- log(arg)
- """
-
- class PrintToLog(ast.NodeTransformer):
- def visit_Call(self, node: ast.Call):
- self.generic_visit(node)
- if isinstance(node.func, ast.Name) and node.func.id == 'print':
- node.func.id = 'log'
- return node
-
- self.assertASTTransformation(PrintToLog, code, expected)
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_node_replace(self):
- code = """
- def func(arg):
- print(arg)
- """
- expected = """
- def func(arg):
- logger.log(arg, debug=True)
- """
-
- class PrintToLog(ast.NodeTransformer):
- def visit_Call(self, node: ast.Call):
- self.generic_visit(node)
- if isinstance(node.func, ast.Name) and node.func.id == 'print':
- return ast.Call(
- func=ast.Attribute(
- ast.Name('logger', ctx=ast.Load()),
- attr='log',
- ctx=ast.Load(),
- ),
- args=node.args,
- keywords=[ast.keyword('debug', ast.Constant(True))],
- )
- return node
-
- self.assertASTTransformation(PrintToLog, code, expected)
-
-
-@support.cpython_only
-class ModuleStateTests(unittest.TestCase):
- # bpo-41194, bpo-41261, bpo-41631: The _ast module uses a global state.
-
- def check_ast_module(self):
- # Check that the _ast module still works as expected
- code = 'x + 1'
- filename = ''
- mode = 'eval'
-
- # Create _ast.AST subclasses instances
- ast_tree = compile(code, filename, mode, flags=ast.PyCF_ONLY_AST)
-
- # Call PyAST_Check()
- code = compile(ast_tree, filename, mode)
- self.assertIsInstance(code, types.CodeType)
-
- def test_reload_module(self):
- # bpo-41194: Importing the _ast module twice must not crash.
- with support.swap_item(sys.modules, '_ast', None):
- del sys.modules['_ast']
- import _ast as ast1
-
- del sys.modules['_ast']
- import _ast as ast2
-
- self.check_ast_module()
-
- # Unloading the two _ast module instances must not crash.
- del ast1
- del ast2
- support.gc_collect()
-
- self.check_ast_module()
-
- def test_sys_modules(self):
- # bpo-41631: Test reproducing a Mercurial crash when PyAST_Check()
- # imported the _ast module internally.
- lazy_mod = object()
-
- def my_import(name, *args, **kw):
- sys.modules[name] = lazy_mod
- return lazy_mod
-
- with support.swap_item(sys.modules, '_ast', None):
- del sys.modules['_ast']
-
- with support.swap_attr(builtins, '__import__', my_import):
- # Test that compile() does not import the _ast module
- self.check_ast_module()
- self.assertNotIn('_ast', sys.modules)
-
- # Sanity check of the test itself
- import _ast
- self.assertIs(_ast, lazy_mod)
-
- def test_subinterpreter(self):
- # bpo-41631: Importing and using the _ast module in a subinterpreter
- # must not crash.
- code = dedent('''
- import _ast
- import ast
- import gc
- import sys
- import types
-
- # Create _ast.AST subclasses instances and call PyAST_Check()
- ast_tree = compile('x+1', '', 'eval',
- flags=ast.PyCF_ONLY_AST)
- code = compile(ast_tree, 'string', 'eval')
- if not isinstance(code, types.CodeType):
- raise AssertionError
-
- # Unloading the _ast module must not crash.
- del ast, _ast
- del sys.modules['ast'], sys.modules['_ast']
- gc.collect()
- ''')
- res = support.run_in_subinterp(code)
- self.assertEqual(res, 0)
-
-
-class ASTMainTests(unittest.TestCase):
- # Tests `ast.main()` function.
-
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
- def test_cli_file_input(self):
- code = "print(1, 2, 3)"
- expected = ast.dump(ast.parse(code), indent=3)
-
- with os_helper.temp_dir() as tmp_dir:
- filename = os.path.join(tmp_dir, "test_module.py")
- with open(filename, 'w', encoding='utf-8') as f:
- f.write(code)
- res, _ = script_helper.run_python_until_end("-m", "ast", filename)
-
- self.assertEqual(res.err, b"")
- self.assertEqual(expected.splitlines(),
- res.out.decode("utf8").splitlines())
- self.assertEqual(res.rc, 0)
-
-
-def main():
- if __name__ != '__main__':
- return
- if sys.argv[1:] == ['-g']:
- for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
- (eval_tests, "eval")):
- print(kind+"_results = [")
- for statement in statements:
- tree = ast.parse(statement, "?", kind)
- print("%r," % (to_tuple(tree),))
- print("]")
- print("main()")
- raise SystemExit
- unittest.main()
-
-#### EVERYTHING BELOW IS GENERATED BY python Lib/test/test_ast.py -g #####
-exec_results = [
-('Module', [('Expr', (1, 0, 1, 4), ('Constant', (1, 0, 1, 4), None, None))], []),
-('Module', [('Expr', (1, 0, 1, 18), ('Constant', (1, 0, 1, 18), 'module docstring', None))], []),
-('Module', [('FunctionDef', (1, 0, 1, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 9, 1, 13))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (1, 9, 1, 29), ('Constant', (1, 9, 1, 29), 'function docstring', None))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 14), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, []), [('Pass', (1, 10, 1, 14))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 0, None)]), [('Pass', (1, 12, 1, 16))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 11), 'args', None, None), [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 23), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 16), 'args', ('Starred', (1, 13, 1, 16), ('Name', (1, 14, 1, 16), 'Ts', ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 19, 1, 23))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 36), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 29), 'args', ('Starred', (1, 13, 1, 29), ('Subscript', (1, 14, 1, 29), ('Name', (1, 14, 1, 19), 'tuple', ('Load',)), ('Tuple', (1, 20, 1, 28), [('Name', (1, 20, 1, 23), 'int', ('Load',)), ('Constant', (1, 25, 1, 28), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 32, 1, 36))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 36), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 29), 'args', ('Starred', (1, 13, 1, 29), ('Subscript', (1, 14, 1, 29), ('Name', (1, 14, 1, 19), 'tuple', ('Load',)), ('Tuple', (1, 20, 1, 28), [('Name', (1, 20, 1, 23), 'int', ('Load',)), ('Starred', (1, 25, 1, 28), ('Name', (1, 26, 1, 28), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 32, 1, 36))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 21), 'f', ('arguments', [], [], None, [], [], ('arg', (1, 8, 1, 14), 'kwargs', None, None), []), [('Pass', (1, 17, 1, 21))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 71), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None), ('arg', (1, 9, 1, 10), 'b', None, None), ('arg', (1, 14, 1, 15), 'c', None, None), ('arg', (1, 22, 1, 23), 'd', None, None), ('arg', (1, 28, 1, 29), 'e', None, None)], ('arg', (1, 35, 1, 39), 'args', None, None), [('arg', (1, 41, 1, 42), 'f', None, None)], [('Constant', (1, 43, 1, 45), 42, None)], ('arg', (1, 49, 1, 55), 'kwargs', None, None), [('Constant', (1, 11, 1, 12), 1, None), ('Constant', (1, 16, 1, 20), None, None), ('List', (1, 24, 1, 26), [], ('Load',)), ('Dict', (1, 30, 1, 32), [], [])]), [('Expr', (1, 58, 1, 71), ('Constant', (1, 58, 1, 71), 'doc for f()', None))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 27), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 23, 1, 27))], [], ('Subscript', (1, 11, 1, 21), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 20), [('Starred', (1, 17, 1, 20), ('Name', (1, 18, 1, 20), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 32), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 28, 1, 32))], [], ('Subscript', (1, 11, 1, 26), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 25), [('Name', (1, 17, 1, 20), 'int', ('Load',)), ('Starred', (1, 22, 1, 25), ('Name', (1, 23, 1, 25), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 45), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 41, 1, 45))], [], ('Subscript', (1, 11, 1, 39), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 38), [('Name', (1, 17, 1, 20), 'int', ('Load',)), ('Starred', (1, 22, 1, 38), ('Subscript', (1, 23, 1, 38), ('Name', (1, 23, 1, 28), 'tuple', ('Load',)), ('Tuple', (1, 29, 1, 37), [('Name', (1, 29, 1, 32), 'int', ('Load',)), ('Constant', (1, 34, 1, 37), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
-('Module', [('ClassDef', (1, 0, 1, 12), 'C', [], [], [('Pass', (1, 8, 1, 12))], [], [])], []),
-('Module', [('ClassDef', (1, 0, 1, 32), 'C', [], [], [('Expr', (1, 9, 1, 32), ('Constant', (1, 9, 1, 32), 'docstring for class C', None))], [], [])], []),
-('Module', [('ClassDef', (1, 0, 1, 21), 'C', [('Name', (1, 8, 1, 14), 'object', ('Load',))], [], [('Pass', (1, 17, 1, 21))], [], [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Return', (1, 8, 1, 16), ('Constant', (1, 15, 1, 16), 1, None))], [], None, None, [])], []),
-('Module', [('Delete', (1, 0, 1, 5), [('Name', (1, 4, 1, 5), 'v', ('Del',))])], []),
-('Module', [('Assign', (1, 0, 1, 5), [('Name', (1, 0, 1, 1), 'v', ('Store',))], ('Constant', (1, 4, 1, 5), 1, None), None)], []),
-('Module', [('Assign', (1, 0, 1, 7), [('Tuple', (1, 0, 1, 3), [('Name', (1, 0, 1, 1), 'a', ('Store',)), ('Name', (1, 2, 1, 3), 'b', ('Store',))], ('Store',))], ('Name', (1, 6, 1, 7), 'c', ('Load',)), None)], []),
-('Module', [('Assign', (1, 0, 1, 9), [('Tuple', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
-('Module', [('Assign', (1, 0, 1, 9), [('List', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
-('Module', [('AnnAssign', (1, 0, 1, 13), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 13), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 12), [('Starred', (1, 9, 1, 12), ('Name', (1, 10, 1, 12), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
-('Module', [('AnnAssign', (1, 0, 1, 18), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 18), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 17), [('Name', (1, 9, 1, 12), 'int', ('Load',)), ('Starred', (1, 14, 1, 17), ('Name', (1, 15, 1, 17), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
-('Module', [('AnnAssign', (1, 0, 1, 31), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 31), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 30), [('Name', (1, 9, 1, 12), 'int', ('Load',)), ('Starred', (1, 14, 1, 30), ('Subscript', (1, 15, 1, 30), ('Name', (1, 15, 1, 20), 'tuple', ('Load',)), ('Tuple', (1, 21, 1, 29), [('Name', (1, 21, 1, 24), 'str', ('Load',)), ('Constant', (1, 26, 1, 29), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
-('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Add',), ('Constant', (1, 5, 1, 6), 1, None))], []),
-('Module', [('For', (1, 0, 1, 15), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Pass', (1, 11, 1, 15))], [], None)], []),
-('Module', [('While', (1, 0, 1, 12), ('Name', (1, 6, 1, 7), 'v', ('Load',)), [('Pass', (1, 8, 1, 12))], [])], []),
-('Module', [('If', (1, 0, 1, 9), ('Name', (1, 3, 1, 4), 'v', ('Load',)), [('Pass', (1, 5, 1, 9))], [])], []),
-('Module', [('If', (1, 0, 4, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 4, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [])])], []),
-('Module', [('If', (1, 0, 6, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 6, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [('Pass', (6, 2, 6, 6))])])], []),
-('Module', [('With', (1, 0, 1, 17), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',)))], [('Pass', (1, 13, 1, 17))], None)], []),
-('Module', [('With', (1, 0, 1, 25), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',))), ('withitem', ('Name', (1, 13, 1, 14), 'z', ('Load',)), ('Name', (1, 18, 1, 19), 'q', ('Store',)))], [('Pass', (1, 21, 1, 25))], None)], []),
-('Module', [('Raise', (1, 0, 1, 25), ('Call', (1, 6, 1, 25), ('Name', (1, 6, 1, 15), 'Exception', ('Load',)), [('Constant', (1, 16, 1, 24), 'string', None)], []), None)], []),
-('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [], [])], []),
-('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [], [], [('Pass', (4, 2, 4, 6))])], []),
-('Module', [('TryStar', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 8, 3, 17), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [], [])], []),
-('Module', [('Assert', (1, 0, 1, 8), ('Name', (1, 7, 1, 8), 'v', ('Load',)), None)], []),
-('Module', [('Import', (1, 0, 1, 10), [('alias', (1, 7, 1, 10), 'sys', None)])], []),
-('Module', [('ImportFrom', (1, 0, 1, 17), 'sys', [('alias', (1, 16, 1, 17), 'v', None)], 0)], []),
-('Module', [('Global', (1, 0, 1, 8), ['v'])], []),
-('Module', [('Expr', (1, 0, 1, 1), ('Constant', (1, 0, 1, 1), 1, None))], []),
-('Module', [('Pass', (1, 0, 1, 4))], []),
-('Module', [('For', (1, 0, 1, 16), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Break', (1, 11, 1, 16))], [], None)], []),
-('Module', [('For', (1, 0, 1, 19), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Continue', (1, 11, 1, 19))], [], None)], []),
-('Module', [('For', (1, 0, 1, 18), ('Tuple', (1, 4, 1, 7), [('Name', (1, 4, 1, 5), 'a', ('Store',)), ('Name', (1, 6, 1, 7), 'b', ('Store',))], ('Store',)), ('Name', (1, 11, 1, 12), 'c', ('Load',)), [('Pass', (1, 14, 1, 18))], [], None)], []),
-('Module', [('For', (1, 0, 1, 20), ('Tuple', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
-('Module', [('For', (1, 0, 1, 20), ('List', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
-('Module', [('Expr', (1, 0, 11, 5), ('GeneratorExp', (1, 0, 11, 5), ('Tuple', (2, 4, 6, 5), [('Name', (3, 4, 3, 6), 'Aa', ('Load',)), ('Name', (5, 7, 5, 9), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4, 10, 6), [('Name', (8, 4, 8, 6), 'Aa', ('Store',)), ('Name', (10, 4, 10, 6), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10, 10, 12), 'Cc', ('Load',)), [], 0)]))], []),
-('Module', [('Expr', (1, 0, 1, 34), ('DictComp', (1, 0, 1, 34), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Name', (1, 11, 1, 12), 'w', ('Store',)), ('Name', (1, 16, 1, 17), 'x', ('Load',)), [], 0), ('comprehension', ('Name', (1, 22, 1, 23), 'm', ('Store',)), ('Name', (1, 27, 1, 28), 'p', ('Load',)), [('Name', (1, 32, 1, 33), 'g', ('Load',))], 0)]))], []),
-('Module', [('Expr', (1, 0, 1, 20), ('DictComp', (1, 0, 1, 20), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'v', ('Store',)), ('Name', (1, 13, 1, 14), 'w', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'x', ('Load',)), [], 0)]))], []),
-('Module', [('Expr', (1, 0, 1, 19), ('SetComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 12, 1, 13), 'x', ('Load',)), [('Name', (1, 17, 1, 18), 'g', ('Load',))], 0)]))], []),
-('Module', [('Expr', (1, 0, 1, 16), ('SetComp', (1, 0, 1, 16), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7, 1, 10), [('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 9, 1, 10), 'm', ('Store',))], ('Store',)), ('Name', (1, 14, 1, 15), 'x', ('Load',)), [], 0)]))], []),
-('Module', [('AsyncFunctionDef', (1, 0, 3, 18), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 17), ('Constant', (2, 1, 2, 17), 'async function', None)), ('Expr', (3, 1, 3, 18), ('Await', (3, 1, 3, 18), ('Call', (3, 7, 3, 18), ('Name', (3, 7, 3, 16), 'something', ('Load',)), [], [])))], [], None, None, [])], []),
-('Module', [('AsyncFunctionDef', (1, 0, 3, 8), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncFor', (2, 1, 3, 8), ('Name', (2, 11, 2, 12), 'e', ('Store',)), ('Name', (2, 16, 2, 17), 'i', ('Load',)), [('Expr', (2, 19, 2, 20), ('Constant', (2, 19, 2, 20), 1, None))], [('Expr', (3, 7, 3, 8), ('Constant', (3, 7, 3, 8), 2, None))], None)], [], None, None, [])], []),
-('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncWith', (2, 1, 2, 21), [('withitem', ('Name', (2, 12, 2, 13), 'a', ('Load',)), ('Name', (2, 17, 2, 18), 'b', ('Store',)))], [('Expr', (2, 20, 2, 21), ('Constant', (2, 20, 2, 21), 1, None))], None)], [], None, None, [])], []),
-('Module', [('Expr', (1, 0, 1, 14), ('Dict', (1, 0, 1, 14), [None, ('Constant', (1, 10, 1, 11), 2, None)], [('Dict', (1, 3, 1, 8), [('Constant', (1, 4, 1, 5), 1, None)], [('Constant', (1, 6, 1, 7), 2, None)]), ('Constant', (1, 12, 1, 13), 3, None)]))], []),
-('Module', [('Expr', (1, 0, 1, 12), ('Set', (1, 0, 1, 12), [('Starred', (1, 1, 1, 8), ('Set', (1, 2, 1, 8), [('Constant', (1, 3, 1, 4), 1, None), ('Constant', (1, 6, 1, 7), 2, None)]), ('Load',)), ('Constant', (1, 10, 1, 11), 3, None)]))], []),
-('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 21), ('ListComp', (2, 1, 2, 21), ('Name', (2, 2, 2, 3), 'i', ('Load',)), [('comprehension', ('Name', (2, 14, 2, 15), 'b', ('Store',)), ('Name', (2, 19, 2, 20), 'c', ('Load',)), [], 1)]))], [], None, None, [])], []),
-('Module', [('FunctionDef', (4, 0, 4, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None, [])], []),
-('Module', [('AsyncFunctionDef', (4, 0, 4, 19), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 15, 4, 19))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None, [])], []),
-('Module', [('ClassDef', (4, 0, 4, 13), 'C', [], [], [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], [])], []),
-('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Call', (1, 1, 1, 19), ('Name', (1, 1, 1, 5), 'deco', ('Load',)), [('GeneratorExp', (1, 5, 1, 19), ('Name', (1, 6, 1, 7), 'a', ('Load',)), [('comprehension', ('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 17, 1, 18), 'b', ('Load',)), [], 0)])], [])], None, None, [])], []),
-('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Attribute', (1, 1, 1, 6), ('Attribute', (1, 1, 1, 4), ('Name', (1, 1, 1, 2), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',))], None, None, [])], []),
-('Module', [('Expr', (1, 0, 1, 8), ('NamedExpr', (1, 1, 1, 7), ('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Constant', (1, 6, 1, 7), 1, None)))], []),
-('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 26), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None), ('arg', (1, 15, 1, 16), 'd', None, None), ('arg', (1, 18, 1, 19), 'e', None, None)], None, [], [], None, []), [('Pass', (1, 22, 1, 26))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], None, []), [('Pass', (1, 25, 1, 29))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 39), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], ('arg', (1, 26, 1, 32), 'kwargs', None, None), []), [('Pass', (1, 35, 1, 39))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 20), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None)]), [('Pass', (1, 16, 1, 20))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None), ('arg', (1, 19, 1, 20), 'c', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None), ('Constant', (1, 21, 1, 22), 4, None)]), [('Pass', (1, 25, 1, 29))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 32), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 28, 1, 32))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 30), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 26, 1, 30))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 42), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], ('arg', (1, 29, 1, 35), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 38, 1, 42))], [], None, None, [])], []),
-('Module', [('FunctionDef', (1, 0, 1, 40), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], ('arg', (1, 27, 1, 33), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 36, 1, 40))], [], None, None, [])], []),
-('Module', [('TypeAlias', (1, 0, 1, 12), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [], ('Name', (1, 9, 1, 12), 'int', ('Load',)))], []),
-('Module', [('TypeAlias', (1, 0, 1, 15), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 8), 'T', None)], ('Name', (1, 12, 1, 15), 'int', ('Load',)))], []),
-('Module', [('TypeAlias', (1, 0, 1, 32), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 8), 'T', None), ('TypeVarTuple', (1, 10, 1, 13), 'Ts'), ('ParamSpec', (1, 15, 1, 18), 'P')], ('Tuple', (1, 22, 1, 32), [('Name', (1, 23, 1, 24), 'T', ('Load',)), ('Name', (1, 26, 1, 28), 'Ts', ('Load',)), ('Name', (1, 30, 1, 31), 'P', ('Load',))], ('Load',)))], []),
-('Module', [('TypeAlias', (1, 0, 1, 37), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 13), 'T', ('Name', (1, 10, 1, 13), 'int', ('Load',))), ('TypeVarTuple', (1, 15, 1, 18), 'Ts'), ('ParamSpec', (1, 20, 1, 23), 'P')], ('Tuple', (1, 27, 1, 37), [('Name', (1, 28, 1, 29), 'T', ('Load',)), ('Name', (1, 31, 1, 33), 'Ts', ('Load',)), ('Name', (1, 35, 1, 36), 'P', ('Load',))], ('Load',)))], []),
-('Module', [('TypeAlias', (1, 0, 1, 44), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 20), 'T', ('Tuple', (1, 10, 1, 20), [('Name', (1, 11, 1, 14), 'int', ('Load',)), ('Name', (1, 16, 1, 19), 'str', ('Load',))], ('Load',))), ('TypeVarTuple', (1, 22, 1, 25), 'Ts'), ('ParamSpec', (1, 27, 1, 30), 'P')], ('Tuple', (1, 34, 1, 44), [('Name', (1, 35, 1, 36), 'T', ('Load',)), ('Name', (1, 38, 1, 40), 'Ts', ('Load',)), ('Name', (1, 42, 1, 43), 'P', ('Load',))], ('Load',)))], []),
-('Module', [('ClassDef', (1, 0, 1, 16), 'X', [], [], [('Pass', (1, 12, 1, 16))], [], [('TypeVar', (1, 8, 1, 9), 'T', None)])], []),
-('Module', [('ClassDef', (1, 0, 1, 26), 'X', [], [], [('Pass', (1, 22, 1, 26))], [], [('TypeVar', (1, 8, 1, 9), 'T', None), ('TypeVarTuple', (1, 11, 1, 14), 'Ts'), ('ParamSpec', (1, 16, 1, 19), 'P')])], []),
-('Module', [('ClassDef', (1, 0, 1, 31), 'X', [], [], [('Pass', (1, 27, 1, 31))], [], [('TypeVar', (1, 8, 1, 14), 'T', ('Name', (1, 11, 1, 14), 'int', ('Load',))), ('TypeVarTuple', (1, 16, 1, 19), 'Ts'), ('ParamSpec', (1, 21, 1, 24), 'P')])], []),
-('Module', [('ClassDef', (1, 0, 1, 38), 'X', [], [], [('Pass', (1, 34, 1, 38))], [], [('TypeVar', (1, 8, 1, 21), 'T', ('Tuple', (1, 11, 1, 21), [('Name', (1, 12, 1, 15), 'int', ('Load',)), ('Name', (1, 17, 1, 20), 'str', ('Load',))], ('Load',))), ('TypeVarTuple', (1, 23, 1, 26), 'Ts'), ('ParamSpec', (1, 28, 1, 31), 'P')])], []),
-('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 12, 1, 16))], [], None, None, [('TypeVar', (1, 6, 1, 7), 'T', None)])], []),
-('Module', [('FunctionDef', (1, 0, 1, 26), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 22, 1, 26))], [], None, None, [('TypeVar', (1, 6, 1, 7), 'T', None), ('TypeVarTuple', (1, 9, 1, 12), 'Ts'), ('ParamSpec', (1, 14, 1, 17), 'P')])], []),
-('Module', [('FunctionDef', (1, 0, 1, 31), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 27, 1, 31))], [], None, None, [('TypeVar', (1, 6, 1, 12), 'T', ('Name', (1, 9, 1, 12), 'int', ('Load',))), ('TypeVarTuple', (1, 14, 1, 17), 'Ts'), ('ParamSpec', (1, 19, 1, 22), 'P')])], []),
-('Module', [('FunctionDef', (1, 0, 1, 38), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 34, 1, 38))], [], None, None, [('TypeVar', (1, 6, 1, 19), 'T', ('Tuple', (1, 9, 1, 19), [('Name', (1, 10, 1, 13), 'int', ('Load',)), ('Name', (1, 15, 1, 18), 'str', ('Load',))], ('Load',))), ('TypeVarTuple', (1, 21, 1, 24), 'Ts'), ('ParamSpec', (1, 26, 1, 29), 'P')])], []),
-]
-single_results = [
-('Interactive', [('Expr', (1, 0, 1, 3), ('BinOp', (1, 0, 1, 3), ('Constant', (1, 0, 1, 1), 1, None), ('Add',), ('Constant', (1, 2, 1, 3), 2, None)))]),
-]
-eval_results = [
-('Expression', ('Constant', (1, 0, 1, 4), None, None)),
-('Expression', ('BoolOp', (1, 0, 1, 7), ('And',), [('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Name', (1, 6, 1, 7), 'b', ('Load',))])),
-('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Add',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
-('Expression', ('UnaryOp', (1, 0, 1, 5), ('Not',), ('Name', (1, 4, 1, 5), 'v', ('Load',)))),
-('Expression', ('Lambda', (1, 0, 1, 11), ('arguments', [], [], None, [], [], None, []), ('Constant', (1, 7, 1, 11), None, None))),
-('Expression', ('Dict', (1, 0, 1, 7), [('Constant', (1, 2, 1, 3), 1, None)], [('Constant', (1, 4, 1, 5), 2, None)])),
-('Expression', ('Dict', (1, 0, 1, 2), [], [])),
-('Expression', ('Set', (1, 0, 1, 7), [('Constant', (1, 1, 1, 5), None, None)])),
-('Expression', ('Dict', (1, 0, 5, 6), [('Constant', (2, 6, 2, 7), 1, None)], [('Constant', (4, 10, 4, 11), 2, None)])),
-('Expression', ('ListComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
-('Expression', ('GeneratorExp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
-('Expression', ('ListComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
-('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('SetComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
-('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('GeneratorExp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
-('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
-('Expression', ('Compare', (1, 0, 1, 9), ('Constant', (1, 0, 1, 1), 1, None), [('Lt',), ('Lt',)], [('Constant', (1, 4, 1, 5), 2, None), ('Constant', (1, 8, 1, 9), 3, None)])),
-('Expression', ('Call', (1, 0, 1, 17), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Constant', (1, 2, 1, 3), 1, None), ('Constant', (1, 4, 1, 5), 2, None), ('Starred', (1, 10, 1, 12), ('Name', (1, 11, 1, 12), 'd', ('Load',)), ('Load',))], [('keyword', (1, 6, 1, 9), 'c', ('Constant', (1, 8, 1, 9), 3, None)), ('keyword', (1, 13, 1, 16), None, ('Name', (1, 15, 1, 16), 'e', ('Load',)))])),
-('Expression', ('Call', (1, 0, 1, 10), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Starred', (1, 2, 1, 9), ('List', (1, 3, 1, 9), [('Constant', (1, 4, 1, 5), 0, None), ('Constant', (1, 7, 1, 8), 1, None)], ('Load',)), ('Load',))], [])),
-('Expression', ('Call', (1, 0, 1, 15), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('GeneratorExp', (1, 1, 1, 15), ('Name', (1, 2, 1, 3), 'a', ('Load',)), [('comprehension', ('Name', (1, 8, 1, 9), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Load',)), [], 0)])], [])),
-('Expression', ('Constant', (1, 0, 1, 2), 10, None)),
-('Expression', ('Constant', (1, 0, 1, 8), 'string', None)),
-('Expression', ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',))),
-('Expression', ('Subscript', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Slice', (1, 2, 1, 5), ('Name', (1, 2, 1, 3), 'b', ('Load',)), ('Name', (1, 4, 1, 5), 'c', ('Load',)), None), ('Load',))),
-('Expression', ('Name', (1, 0, 1, 1), 'v', ('Load',))),
-('Expression', ('List', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
-('Expression', ('List', (1, 0, 1, 2), [], ('Load',))),
-('Expression', ('Tuple', (1, 0, 1, 5), [('Constant', (1, 0, 1, 1), 1, None), ('Constant', (1, 2, 1, 3), 2, None), ('Constant', (1, 4, 1, 5), 3, None)], ('Load',))),
-('Expression', ('Tuple', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
-('Expression', ('Tuple', (1, 0, 1, 2), [], ('Load',))),
-('Expression', ('Call', (1, 0, 1, 17), ('Attribute', (1, 0, 1, 7), ('Attribute', (1, 0, 1, 5), ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8, 1, 16), ('Attribute', (1, 8, 1, 11), ('Name', (1, 8, 1, 9), 'a', ('Load',)), 'b', ('Load',)), ('Slice', (1, 12, 1, 15), ('Constant', (1, 12, 1, 13), 1, None), ('Constant', (1, 14, 1, 15), 2, None), None), ('Load',))], [])),
-]
-main()
diff --git a/Lib/test/test_ast/__init__.py b/Lib/test/test_ast/__init__.py
new file mode 100644
index 0000000000..9a89d27ba9
--- /dev/null
+++ b/Lib/test/test_ast/__init__.py
@@ -0,0 +1,7 @@
+import os
+
+from test import support
+
+
+def load_tests(*args):
+ return support.load_package_tests(os.path.dirname(__file__), *args)
diff --git a/Lib/test/test_ast/snippets.py b/Lib/test/test_ast/snippets.py
new file mode 100644
index 0000000000..28d32b2941
--- /dev/null
+++ b/Lib/test/test_ast/snippets.py
@@ -0,0 +1,601 @@
+import ast
+import sys
+
+from test.test_ast.utils import to_tuple
+
+
+# These tests are compiled through "exec"
+# There should be at least one test per statement
+exec_tests = [
+ # Module docstring
+ "'module docstring'",
+ # FunctionDef
+ "def f(): pass",
+ # FunctionDef with docstring
+ "def f(): 'function docstring'",
+ # FunctionDef with arg
+ "def f(a): pass",
+ # FunctionDef with arg and default value
+ "def f(a=0): pass",
+ # FunctionDef with varargs
+ "def f(*args): pass",
+ # FunctionDef with varargs as TypeVarTuple
+ "def f(*args: *Ts): pass",
+ # FunctionDef with varargs as unpacked Tuple
+ "def f(*args: *tuple[int, ...]): pass",
+ # FunctionDef with varargs as unpacked Tuple *and* TypeVarTuple
+ "def f(*args: *tuple[int, *Ts]): pass",
+ # FunctionDef with kwargs
+ "def f(**kwargs): pass",
+ # FunctionDef with all kind of args and docstring
+ "def f(a, b=1, c=None, d=[], e={}, *args, f=42, **kwargs): 'doc for f()'",
+ # FunctionDef with type annotation on return involving unpacking
+ "def f() -> tuple[*Ts]: pass",
+ "def f() -> tuple[int, *Ts]: pass",
+ "def f() -> tuple[int, *tuple[int, ...]]: pass",
+ # ClassDef
+ "class C:pass",
+ # ClassDef with docstring
+ "class C: 'docstring for class C'",
+ # ClassDef, new style class
+ "class C(object): pass",
+ # Classdef with multiple bases
+ "class C(A, B): pass",
+ # Return
+ "def f():return 1",
+ "def f():return",
+ # Delete
+ "del v",
+ # Assign
+ "v = 1",
+ "a,b = c",
+ "(a,b) = c",
+ "[a,b] = c",
+ "a[b] = c",
+ # AnnAssign with unpacked types
+ "x: tuple[*Ts]",
+ "x: tuple[int, *Ts]",
+ "x: tuple[int, *tuple[str, ...]]",
+ # AugAssign
+ "v += 1",
+ "v -= 1",
+ "v *= 1",
+ "v @= 1",
+ "v /= 1",
+ "v %= 1",
+ "v **= 1",
+ "v <<= 1",
+ "v >>= 1",
+ "v |= 1",
+ "v ^= 1",
+ "v &= 1",
+ "v //= 1",
+ # For
+ "for v in v:pass",
+ # For-Else
+ "for v in v:\n pass\nelse:\n pass",
+ # While
+ "while v:pass",
+ # While-Else
+ "while v:\n pass\nelse:\n pass",
+ # If-Elif-Else
+ "if v:pass",
+ "if a:\n pass\nelif b:\n pass",
+ "if a:\n pass\nelse:\n pass",
+ "if a:\n pass\nelif b:\n pass\nelse:\n pass",
+ "if a:\n pass\nelif b:\n pass\nelif b:\n pass\nelif b:\n pass\nelse:\n pass",
+ # With
+ "with x: pass",
+ "with x, y: pass",
+ "with x as y: pass",
+ "with x as y, z as q: pass",
+ "with (x as y): pass",
+ "with (x, y): pass",
+ # Raise
+ "raise",
+ "raise Exception('string')",
+ "raise Exception",
+ "raise Exception('string') from None",
+ # TryExcept
+ "try:\n pass\nexcept Exception:\n pass",
+ "try:\n pass\nexcept Exception as exc:\n pass",
+ # TryFinally
+ "try:\n pass\nfinally:\n pass",
+ # TryStarExcept
+ "try:\n pass\nexcept* Exception:\n pass",
+ "try:\n pass\nexcept* Exception as exc:\n pass",
+ # TryExceptFinallyElse
+ "try:\n pass\nexcept Exception:\n pass\nelse: pass\nfinally:\n pass",
+ "try:\n pass\nexcept Exception as exc:\n pass\nelse: pass\nfinally:\n pass",
+ "try:\n pass\nexcept* Exception as exc:\n pass\nelse: pass\nfinally:\n pass",
+ # Assert
+ "assert v",
+ # Assert with message
+ "assert v, 'message'",
+ # Import
+ "import sys",
+ "import foo as bar",
+ # ImportFrom
+ "from sys import x as y",
+ "from sys import v",
+ # Global
+ "global v",
+ # Expr
+ "1",
+ # Pass,
+ "pass",
+ # Break
+ "for v in v:break",
+ # Continue
+ "for v in v:continue",
+ # for statements with naked tuples (see http://bugs.python.org/issue6704)
+ "for a,b in c: pass",
+ "for (a,b) in c: pass",
+ "for [a,b] in c: pass",
+ # Multiline generator expression (test for .lineno & .col_offset)
+ """(
+ (
+ Aa
+ ,
+ Bb
+ )
+ for
+ Aa
+ ,
+ Bb in Cc
+ )""",
+ # dictcomp
+ "{a : b for w in x for m in p if g}",
+ # dictcomp with naked tuple
+ "{a : b for v,w in x}",
+ # setcomp
+ "{r for l in x if g}",
+ # setcomp with naked tuple
+ "{r for l,m in x}",
+ # AsyncFunctionDef
+ "async def f():\n 'async function'\n await something()",
+ # AsyncFor
+ "async def f():\n async for e in i: 1\n else: 2",
+ # AsyncWith
+ "async def f():\n async with a as b: 1",
+ # PEP 448: Additional Unpacking Generalizations
+ "{**{1:2}, 2:3}",
+ "{*{1, 2}, 3}",
+ # Function with yield (from)
+ "def f(): yield 1",
+ "def f(): yield from []",
+ # Asynchronous comprehensions
+ "async def f():\n [i async for b in c]",
+ # Decorated FunctionDef
+ "@deco1\n@deco2()\n@deco3(1)\ndef f(): pass",
+ # Decorated AsyncFunctionDef
+ "@deco1\n@deco2()\n@deco3(1)\nasync def f(): pass",
+ # Decorated ClassDef
+ "@deco1\n@deco2()\n@deco3(1)\nclass C: pass",
+ # Decorator with generator argument
+ "@deco(a for a in b)\ndef f(): pass",
+ # Decorator with attribute
+ "@a.b.c\ndef f(): pass",
+ # Simple assignment expression
+ "(a := 1)",
+ # Assignment expression in if statement
+ "if a := foo(): pass",
+ # Assignment expression in while
+ "while a := foo(): pass",
+ # Positional-only arguments
+ "def f(a, /,): pass",
+ "def f(a, /, c, d, e): pass",
+ "def f(a, /, c, *, d, e): pass",
+ "def f(a, /, c, *, d, e, **kwargs): pass",
+ # Positional-only arguments with defaults
+ "def f(a=1, /,): pass",
+ "def f(a=1, /, b=2, c=4): pass",
+ "def f(a=1, /, b=2, *, c=4): pass",
+ "def f(a=1, /, b=2, *, c): pass",
+ "def f(a=1, /, b=2, *, c=4, **kwargs): pass",
+ "def f(a=1, /, b=2, *, c, **kwargs): pass",
+ # Type aliases
+ "type X = int",
+ "type X[T] = int",
+ "type X[T, *Ts, **P] = (T, Ts, P)",
+ "type X[T: int, *Ts, **P] = (T, Ts, P)",
+ "type X[T: (int, str), *Ts, **P] = (T, Ts, P)",
+ "type X[T: int = 1, *Ts = 2, **P =3] = (T, Ts, P)",
+ # Generic classes
+ "class X[T]: pass",
+ "class X[T, *Ts, **P]: pass",
+ "class X[T: int, *Ts, **P]: pass",
+ "class X[T: (int, str), *Ts, **P]: pass",
+ "class X[T: int = 1, *Ts = 2, **P = 3]: pass",
+ # Generic functions
+ "def f[T](): pass",
+ "def f[T, *Ts, **P](): pass",
+ "def f[T: int, *Ts, **P](): pass",
+ "def f[T: (int, str), *Ts, **P](): pass",
+ "def f[T: int = 1, *Ts = 2, **P = 3](): pass",
+ # Match
+ "match x:\n\tcase 1:\n\t\tpass",
+ # Match with _
+ "match x:\n\tcase 1:\n\t\tpass\n\tcase _:\n\t\tpass",
+]
+
+# These are compiled through "single"
+# because of overlap with "eval", it just tests what
+# can't be tested with "eval"
+single_tests = [
+ "1+2"
+]
+
+# These are compiled through "eval"
+# It should test all expressions
+eval_tests = [
+ # Constant(value=None)
+ "None",
+ # True
+ "True",
+ # False
+ "False",
+ # BoolOp
+ "a and b",
+ "a or b",
+ # BinOp
+ "a + b",
+ "a - b",
+ "a * b",
+ "a / b",
+ "a @ b",
+ "a // b",
+ "a ** b",
+ "a % b",
+ "a >> b",
+ "a << b",
+ "a ^ b",
+ "a | b",
+ "a & b",
+ # UnaryOp
+ "not v",
+ "+v",
+ "-v",
+ "~v",
+ # Lambda
+ "lambda:None",
+ # Dict
+ "{ 1:2 }",
+ # Empty dict
+ "{}",
+ # Set
+ "{None,}",
+ # Multiline dict (test for .lineno & .col_offset)
+ """{
+ 1
+ :
+ 2
+ }""",
+ # Multiline list
+ """[
+ 1
+ ,
+ 1
+ ]""",
+ # Multiline tuple
+ """(
+ 1
+ ,
+ )""",
+ # Multiline set
+ """{
+ 1
+ ,
+ 1
+ }""",
+ # ListComp
+ "[a for b in c if d]",
+ # GeneratorExp
+ "(a for b in c if d)",
+ # SetComp
+ "{a for b in c if d}",
+ # DictComp
+ "{k: v for k, v in c if d}",
+ # Comprehensions with multiple for targets
+ "[(a,b) for a,b in c]",
+ "[(a,b) for (a,b) in c]",
+ "[(a,b) for [a,b] in c]",
+ "{(a,b) for a,b in c}",
+ "{(a,b) for (a,b) in c}",
+ "{(a,b) for [a,b] in c}",
+ "((a,b) for a,b in c)",
+ "((a,b) for (a,b) in c)",
+ "((a,b) for [a,b] in c)",
+ # Async comprehensions - async comprehensions can't work outside an asynchronous function
+ #
+ # Yield - yield expressions can't work outside a function
+ #
+ # Compare
+ "1 < 2 < 3",
+ "a == b",
+ "a <= b",
+ "a >= b",
+ "a != b",
+ "a is b",
+ "a is not b",
+ "a in b",
+ "a not in b",
+ # Call without argument
+ "f()",
+ # Call
+ "f(1,2,c=3,*d,**e)",
+ # Call with multi-character starred
+ "f(*[0, 1])",
+ # Call with a generator argument
+ "f(a for a in b)",
+ # Constant(value=int())
+ "10",
+ # Complex num
+ "1j",
+ # Constant(value=str())
+ "'string'",
+ # Attribute
+ "a.b",
+ # Subscript
+ "a[b:c]",
+ # Name
+ "v",
+ # List
+ "[1,2,3]",
+ # Empty list
+ "[]",
+ # Tuple
+ "1,2,3",
+ # Tuple
+ "(1,2,3)",
+ # Empty tuple
+ "()",
+ # Combination
+ "a.b.c.d(a.b[1:2])",
+ # Slice
+ "[5][1:]",
+ "[5][:1]",
+ "[5][::1]",
+ "[5][1:1:1]",
+ # IfExp
+ "foo() if x else bar()",
+ # JoinedStr and FormattedValue
+ "f'{a}'",
+ "f'{a:.2f}'",
+ "f'{a!r}'",
+ "f'foo({a})'",
+]
+
+
+def main():
+ if __name__ != '__main__':
+ return
+ if sys.argv[1:] == ['-g']:
+ for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
+ (eval_tests, "eval")):
+ print(kind+"_results = [")
+ for statement in statements:
+ tree = ast.parse(statement, "?", kind)
+ print("%r," % (to_tuple(tree),))
+ print("]")
+ print("main()")
+ raise SystemExit
+
+#### EVERYTHING BELOW IS GENERATED BY python Lib/test/test_ast/snippets.py -g #####
+exec_results = [
+('Module', [('Expr', (1, 0, 1, 18), ('Constant', (1, 0, 1, 18), 'module docstring', None))], []),
+('Module', [('FunctionDef', (1, 0, 1, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 9, 1, 13))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (1, 9, 1, 29), ('Constant', (1, 9, 1, 29), 'function docstring', None))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 14), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, []), [('Pass', (1, 10, 1, 14))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 0, None)]), [('Pass', (1, 12, 1, 16))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 11), 'args', None, None), [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 23), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 16), 'args', ('Starred', (1, 13, 1, 16), ('Name', (1, 14, 1, 16), 'Ts', ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 19, 1, 23))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 36), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 29), 'args', ('Starred', (1, 13, 1, 29), ('Subscript', (1, 14, 1, 29), ('Name', (1, 14, 1, 19), 'tuple', ('Load',)), ('Tuple', (1, 20, 1, 28), [('Name', (1, 20, 1, 23), 'int', ('Load',)), ('Constant', (1, 25, 1, 28), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 32, 1, 36))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 36), 'f', ('arguments', [], [], ('arg', (1, 7, 1, 29), 'args', ('Starred', (1, 13, 1, 29), ('Subscript', (1, 14, 1, 29), ('Name', (1, 14, 1, 19), 'tuple', ('Load',)), ('Tuple', (1, 20, 1, 28), [('Name', (1, 20, 1, 23), 'int', ('Load',)), ('Starred', (1, 25, 1, 28), ('Name', (1, 26, 1, 28), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), ('Load',)), None), [], [], None, []), [('Pass', (1, 32, 1, 36))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 21), 'f', ('arguments', [], [], None, [], [], ('arg', (1, 8, 1, 14), 'kwargs', None, None), []), [('Pass', (1, 17, 1, 21))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 71), 'f', ('arguments', [], [('arg', (1, 6, 1, 7), 'a', None, None), ('arg', (1, 9, 1, 10), 'b', None, None), ('arg', (1, 14, 1, 15), 'c', None, None), ('arg', (1, 22, 1, 23), 'd', None, None), ('arg', (1, 28, 1, 29), 'e', None, None)], ('arg', (1, 35, 1, 39), 'args', None, None), [('arg', (1, 41, 1, 42), 'f', None, None)], [('Constant', (1, 43, 1, 45), 42, None)], ('arg', (1, 49, 1, 55), 'kwargs', None, None), [('Constant', (1, 11, 1, 12), 1, None), ('Constant', (1, 16, 1, 20), None, None), ('List', (1, 24, 1, 26), [], ('Load',)), ('Dict', (1, 30, 1, 32), [], [])]), [('Expr', (1, 58, 1, 71), ('Constant', (1, 58, 1, 71), 'doc for f()', None))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 27), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 23, 1, 27))], [], ('Subscript', (1, 11, 1, 21), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 20), [('Starred', (1, 17, 1, 20), ('Name', (1, 18, 1, 20), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 32), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 28, 1, 32))], [], ('Subscript', (1, 11, 1, 26), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 25), [('Name', (1, 17, 1, 20), 'int', ('Load',)), ('Starred', (1, 22, 1, 25), ('Name', (1, 23, 1, 25), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 45), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 41, 1, 45))], [], ('Subscript', (1, 11, 1, 39), ('Name', (1, 11, 1, 16), 'tuple', ('Load',)), ('Tuple', (1, 17, 1, 38), [('Name', (1, 17, 1, 20), 'int', ('Load',)), ('Starred', (1, 22, 1, 38), ('Subscript', (1, 23, 1, 38), ('Name', (1, 23, 1, 28), 'tuple', ('Load',)), ('Tuple', (1, 29, 1, 37), [('Name', (1, 29, 1, 32), 'int', ('Load',)), ('Constant', (1, 34, 1, 37), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, [])], []),
+('Module', [('ClassDef', (1, 0, 1, 12), 'C', [], [], [('Pass', (1, 8, 1, 12))], [], [])], []),
+('Module', [('ClassDef', (1, 0, 1, 32), 'C', [], [], [('Expr', (1, 9, 1, 32), ('Constant', (1, 9, 1, 32), 'docstring for class C', None))], [], [])], []),
+('Module', [('ClassDef', (1, 0, 1, 21), 'C', [('Name', (1, 8, 1, 14), 'object', ('Load',))], [], [('Pass', (1, 17, 1, 21))], [], [])], []),
+('Module', [('ClassDef', (1, 0, 1, 19), 'C', [('Name', (1, 8, 1, 9), 'A', ('Load',)), ('Name', (1, 11, 1, 12), 'B', ('Load',))], [], [('Pass', (1, 15, 1, 19))], [], [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Return', (1, 8, 1, 16), ('Constant', (1, 15, 1, 16), 1, None))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 14), 'f', ('arguments', [], [], None, [], [], None, []), [('Return', (1, 8, 1, 14), None)], [], None, None, [])], []),
+('Module', [('Delete', (1, 0, 1, 5), [('Name', (1, 4, 1, 5), 'v', ('Del',))])], []),
+('Module', [('Assign', (1, 0, 1, 5), [('Name', (1, 0, 1, 1), 'v', ('Store',))], ('Constant', (1, 4, 1, 5), 1, None), None)], []),
+('Module', [('Assign', (1, 0, 1, 7), [('Tuple', (1, 0, 1, 3), [('Name', (1, 0, 1, 1), 'a', ('Store',)), ('Name', (1, 2, 1, 3), 'b', ('Store',))], ('Store',))], ('Name', (1, 6, 1, 7), 'c', ('Load',)), None)], []),
+('Module', [('Assign', (1, 0, 1, 9), [('Tuple', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
+('Module', [('Assign', (1, 0, 1, 9), [('List', (1, 0, 1, 5), [('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Name', (1, 3, 1, 4), 'b', ('Store',))], ('Store',))], ('Name', (1, 8, 1, 9), 'c', ('Load',)), None)], []),
+('Module', [('Assign', (1, 0, 1, 8), [('Subscript', (1, 0, 1, 4), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Name', (1, 2, 1, 3), 'b', ('Load',)), ('Store',))], ('Name', (1, 7, 1, 8), 'c', ('Load',)), None)], []),
+('Module', [('AnnAssign', (1, 0, 1, 13), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 13), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 12), [('Starred', (1, 9, 1, 12), ('Name', (1, 10, 1, 12), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
+('Module', [('AnnAssign', (1, 0, 1, 18), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 18), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 17), [('Name', (1, 9, 1, 12), 'int', ('Load',)), ('Starred', (1, 14, 1, 17), ('Name', (1, 15, 1, 17), 'Ts', ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
+('Module', [('AnnAssign', (1, 0, 1, 31), ('Name', (1, 0, 1, 1), 'x', ('Store',)), ('Subscript', (1, 3, 1, 31), ('Name', (1, 3, 1, 8), 'tuple', ('Load',)), ('Tuple', (1, 9, 1, 30), [('Name', (1, 9, 1, 12), 'int', ('Load',)), ('Starred', (1, 14, 1, 30), ('Subscript', (1, 15, 1, 30), ('Name', (1, 15, 1, 20), 'tuple', ('Load',)), ('Tuple', (1, 21, 1, 29), [('Name', (1, 21, 1, 24), 'str', ('Load',)), ('Constant', (1, 26, 1, 29), Ellipsis, None)], ('Load',)), ('Load',)), ('Load',))], ('Load',)), ('Load',)), None, 1)], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Add',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Sub',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Mult',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('MatMult',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Div',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Mod',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 7), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('Pow',), ('Constant', (1, 6, 1, 7), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 7), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('LShift',), ('Constant', (1, 6, 1, 7), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 7), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('RShift',), ('Constant', (1, 6, 1, 7), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('BitOr',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('BitXor',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('BitAnd',), ('Constant', (1, 5, 1, 6), 1, None))], []),
+('Module', [('AugAssign', (1, 0, 1, 7), ('Name', (1, 0, 1, 1), 'v', ('Store',)), ('FloorDiv',), ('Constant', (1, 6, 1, 7), 1, None))], []),
+('Module', [('For', (1, 0, 1, 15), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Pass', (1, 11, 1, 15))], [], None)], []),
+('Module', [('For', (1, 0, 4, 6), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Pass', (2, 2, 2, 6))], [('Pass', (4, 2, 4, 6))], None)], []),
+('Module', [('While', (1, 0, 1, 12), ('Name', (1, 6, 1, 7), 'v', ('Load',)), [('Pass', (1, 8, 1, 12))], [])], []),
+('Module', [('While', (1, 0, 4, 6), ('Name', (1, 6, 1, 7), 'v', ('Load',)), [('Pass', (2, 2, 2, 6))], [('Pass', (4, 2, 4, 6))])], []),
+('Module', [('If', (1, 0, 1, 9), ('Name', (1, 3, 1, 4), 'v', ('Load',)), [('Pass', (1, 5, 1, 9))], [])], []),
+('Module', [('If', (1, 0, 4, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 4, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [])])], []),
+('Module', [('If', (1, 0, 4, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('Pass', (4, 2, 4, 6))])], []),
+('Module', [('If', (1, 0, 6, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 6, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [('Pass', (6, 2, 6, 6))])])], []),
+('Module', [('If', (1, 0, 10, 6), ('Name', (1, 3, 1, 4), 'a', ('Load',)), [('Pass', (2, 2, 2, 6))], [('If', (3, 0, 10, 6), ('Name', (3, 5, 3, 6), 'b', ('Load',)), [('Pass', (4, 2, 4, 6))], [('If', (5, 0, 10, 6), ('Name', (5, 5, 5, 6), 'b', ('Load',)), [('Pass', (6, 2, 6, 6))], [('If', (7, 0, 10, 6), ('Name', (7, 5, 7, 6), 'b', ('Load',)), [('Pass', (8, 2, 8, 6))], [('Pass', (10, 2, 10, 6))])])])])], []),
+('Module', [('With', (1, 0, 1, 12), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), None)], [('Pass', (1, 8, 1, 12))], None)], []),
+('Module', [('With', (1, 0, 1, 15), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), None), ('withitem', ('Name', (1, 8, 1, 9), 'y', ('Load',)), None)], [('Pass', (1, 11, 1, 15))], None)], []),
+('Module', [('With', (1, 0, 1, 17), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',)))], [('Pass', (1, 13, 1, 17))], None)], []),
+('Module', [('With', (1, 0, 1, 25), [('withitem', ('Name', (1, 5, 1, 6), 'x', ('Load',)), ('Name', (1, 10, 1, 11), 'y', ('Store',))), ('withitem', ('Name', (1, 13, 1, 14), 'z', ('Load',)), ('Name', (1, 18, 1, 19), 'q', ('Store',)))], [('Pass', (1, 21, 1, 25))], None)], []),
+('Module', [('With', (1, 0, 1, 19), [('withitem', ('Name', (1, 6, 1, 7), 'x', ('Load',)), ('Name', (1, 11, 1, 12), 'y', ('Store',)))], [('Pass', (1, 15, 1, 19))], None)], []),
+('Module', [('With', (1, 0, 1, 17), [('withitem', ('Name', (1, 6, 1, 7), 'x', ('Load',)), None), ('withitem', ('Name', (1, 9, 1, 10), 'y', ('Load',)), None)], [('Pass', (1, 13, 1, 17))], None)], []),
+('Module', [('Raise', (1, 0, 1, 5), None, None)], []),
+('Module', [('Raise', (1, 0, 1, 25), ('Call', (1, 6, 1, 25), ('Name', (1, 6, 1, 15), 'Exception', ('Load',)), [('Constant', (1, 16, 1, 24), 'string', None)], []), None)], []),
+('Module', [('Raise', (1, 0, 1, 15), ('Name', (1, 6, 1, 15), 'Exception', ('Load',)), None)], []),
+('Module', [('Raise', (1, 0, 1, 35), ('Call', (1, 6, 1, 25), ('Name', (1, 6, 1, 15), 'Exception', ('Load',)), [('Constant', (1, 16, 1, 24), 'string', None)], []), ('Constant', (1, 31, 1, 35), None, None))], []),
+('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [], [])], []),
+('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), 'exc', [('Pass', (4, 2, 4, 6))])], [], [])], []),
+('Module', [('Try', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [], [], [('Pass', (4, 2, 4, 6))])], []),
+('Module', [('TryStar', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 8, 3, 17), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [], [])], []),
+('Module', [('TryStar', (1, 0, 4, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 8, 3, 17), 'Exception', ('Load',)), 'exc', [('Pass', (4, 2, 4, 6))])], [], [])], []),
+('Module', [('Try', (1, 0, 7, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), None, [('Pass', (4, 2, 4, 6))])], [('Pass', (5, 7, 5, 11))], [('Pass', (7, 2, 7, 6))])], []),
+('Module', [('Try', (1, 0, 7, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 7, 3, 16), 'Exception', ('Load',)), 'exc', [('Pass', (4, 2, 4, 6))])], [('Pass', (5, 7, 5, 11))], [('Pass', (7, 2, 7, 6))])], []),
+('Module', [('TryStar', (1, 0, 7, 6), [('Pass', (2, 2, 2, 6))], [('ExceptHandler', (3, 0, 4, 6), ('Name', (3, 8, 3, 17), 'Exception', ('Load',)), 'exc', [('Pass', (4, 2, 4, 6))])], [('Pass', (5, 7, 5, 11))], [('Pass', (7, 2, 7, 6))])], []),
+('Module', [('Assert', (1, 0, 1, 8), ('Name', (1, 7, 1, 8), 'v', ('Load',)), None)], []),
+('Module', [('Assert', (1, 0, 1, 19), ('Name', (1, 7, 1, 8), 'v', ('Load',)), ('Constant', (1, 10, 1, 19), 'message', None))], []),
+('Module', [('Import', (1, 0, 1, 10), [('alias', (1, 7, 1, 10), 'sys', None)])], []),
+('Module', [('Import', (1, 0, 1, 17), [('alias', (1, 7, 1, 17), 'foo', 'bar')])], []),
+('Module', [('ImportFrom', (1, 0, 1, 22), 'sys', [('alias', (1, 16, 1, 22), 'x', 'y')], 0)], []),
+('Module', [('ImportFrom', (1, 0, 1, 17), 'sys', [('alias', (1, 16, 1, 17), 'v', None)], 0)], []),
+('Module', [('Global', (1, 0, 1, 8), ['v'])], []),
+('Module', [('Expr', (1, 0, 1, 1), ('Constant', (1, 0, 1, 1), 1, None))], []),
+('Module', [('Pass', (1, 0, 1, 4))], []),
+('Module', [('For', (1, 0, 1, 16), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Break', (1, 11, 1, 16))], [], None)], []),
+('Module', [('For', (1, 0, 1, 19), ('Name', (1, 4, 1, 5), 'v', ('Store',)), ('Name', (1, 9, 1, 10), 'v', ('Load',)), [('Continue', (1, 11, 1, 19))], [], None)], []),
+('Module', [('For', (1, 0, 1, 18), ('Tuple', (1, 4, 1, 7), [('Name', (1, 4, 1, 5), 'a', ('Store',)), ('Name', (1, 6, 1, 7), 'b', ('Store',))], ('Store',)), ('Name', (1, 11, 1, 12), 'c', ('Load',)), [('Pass', (1, 14, 1, 18))], [], None)], []),
+('Module', [('For', (1, 0, 1, 20), ('Tuple', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
+('Module', [('For', (1, 0, 1, 20), ('List', (1, 4, 1, 9), [('Name', (1, 5, 1, 6), 'a', ('Store',)), ('Name', (1, 7, 1, 8), 'b', ('Store',))], ('Store',)), ('Name', (1, 13, 1, 14), 'c', ('Load',)), [('Pass', (1, 16, 1, 20))], [], None)], []),
+('Module', [('Expr', (1, 0, 11, 5), ('GeneratorExp', (1, 0, 11, 5), ('Tuple', (2, 4, 6, 5), [('Name', (3, 4, 3, 6), 'Aa', ('Load',)), ('Name', (5, 7, 5, 9), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4, 10, 6), [('Name', (8, 4, 8, 6), 'Aa', ('Store',)), ('Name', (10, 4, 10, 6), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10, 10, 12), 'Cc', ('Load',)), [], 0)]))], []),
+('Module', [('Expr', (1, 0, 1, 34), ('DictComp', (1, 0, 1, 34), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Name', (1, 11, 1, 12), 'w', ('Store',)), ('Name', (1, 16, 1, 17), 'x', ('Load',)), [], 0), ('comprehension', ('Name', (1, 22, 1, 23), 'm', ('Store',)), ('Name', (1, 27, 1, 28), 'p', ('Load',)), [('Name', (1, 32, 1, 33), 'g', ('Load',))], 0)]))], []),
+('Module', [('Expr', (1, 0, 1, 20), ('DictComp', (1, 0, 1, 20), ('Name', (1, 1, 1, 2), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'v', ('Store',)), ('Name', (1, 13, 1, 14), 'w', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'x', ('Load',)), [], 0)]))], []),
+('Module', [('Expr', (1, 0, 1, 19), ('SetComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 12, 1, 13), 'x', ('Load',)), [('Name', (1, 17, 1, 18), 'g', ('Load',))], 0)]))], []),
+('Module', [('Expr', (1, 0, 1, 16), ('SetComp', (1, 0, 1, 16), ('Name', (1, 1, 1, 2), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7, 1, 10), [('Name', (1, 7, 1, 8), 'l', ('Store',)), ('Name', (1, 9, 1, 10), 'm', ('Store',))], ('Store',)), ('Name', (1, 14, 1, 15), 'x', ('Load',)), [], 0)]))], []),
+('Module', [('AsyncFunctionDef', (1, 0, 3, 18), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 17), ('Constant', (2, 1, 2, 17), 'async function', None)), ('Expr', (3, 1, 3, 18), ('Await', (3, 1, 3, 18), ('Call', (3, 7, 3, 18), ('Name', (3, 7, 3, 16), 'something', ('Load',)), [], [])))], [], None, None, [])], []),
+('Module', [('AsyncFunctionDef', (1, 0, 3, 8), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncFor', (2, 1, 3, 8), ('Name', (2, 11, 2, 12), 'e', ('Store',)), ('Name', (2, 16, 2, 17), 'i', ('Load',)), [('Expr', (2, 19, 2, 20), ('Constant', (2, 19, 2, 20), 1, None))], [('Expr', (3, 7, 3, 8), ('Constant', (3, 7, 3, 8), 2, None))], None)], [], None, None, [])], []),
+('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('AsyncWith', (2, 1, 2, 21), [('withitem', ('Name', (2, 12, 2, 13), 'a', ('Load',)), ('Name', (2, 17, 2, 18), 'b', ('Store',)))], [('Expr', (2, 20, 2, 21), ('Constant', (2, 20, 2, 21), 1, None))], None)], [], None, None, [])], []),
+('Module', [('Expr', (1, 0, 1, 14), ('Dict', (1, 0, 1, 14), [None, ('Constant', (1, 10, 1, 11), 2, None)], [('Dict', (1, 3, 1, 8), [('Constant', (1, 4, 1, 5), 1, None)], [('Constant', (1, 6, 1, 7), 2, None)]), ('Constant', (1, 12, 1, 13), 3, None)]))], []),
+('Module', [('Expr', (1, 0, 1, 12), ('Set', (1, 0, 1, 12), [('Starred', (1, 1, 1, 8), ('Set', (1, 2, 1, 8), [('Constant', (1, 3, 1, 4), 1, None), ('Constant', (1, 6, 1, 7), 2, None)]), ('Load',)), ('Constant', (1, 10, 1, 11), 3, None)]))], []),
+('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (1, 9, 1, 16), ('Yield', (1, 9, 1, 16), ('Constant', (1, 15, 1, 16), 1, None)))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 22), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (1, 9, 1, 22), ('YieldFrom', (1, 9, 1, 22), ('List', (1, 20, 1, 22), [], ('Load',))))], [], None, None, [])], []),
+('Module', [('AsyncFunctionDef', (1, 0, 2, 21), 'f', ('arguments', [], [], None, [], [], None, []), [('Expr', (2, 1, 2, 21), ('ListComp', (2, 1, 2, 21), ('Name', (2, 2, 2, 3), 'i', ('Load',)), [('comprehension', ('Name', (2, 14, 2, 15), 'b', ('Store',)), ('Name', (2, 19, 2, 20), 'c', ('Load',)), [], 1)]))], [], None, None, [])], []),
+('Module', [('FunctionDef', (4, 0, 4, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None, [])], []),
+('Module', [('AsyncFunctionDef', (4, 0, 4, 19), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (4, 15, 4, 19))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], None, None, [])], []),
+('Module', [('ClassDef', (4, 0, 4, 13), 'C', [], [], [('Pass', (4, 9, 4, 13))], [('Name', (1, 1, 1, 6), 'deco1', ('Load',)), ('Call', (2, 1, 2, 8), ('Name', (2, 1, 2, 6), 'deco2', ('Load',)), [], []), ('Call', (3, 1, 3, 9), ('Name', (3, 1, 3, 6), 'deco3', ('Load',)), [('Constant', (3, 7, 3, 8), 1, None)], [])], [])], []),
+('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Call', (1, 1, 1, 19), ('Name', (1, 1, 1, 5), 'deco', ('Load',)), [('GeneratorExp', (1, 5, 1, 19), ('Name', (1, 6, 1, 7), 'a', ('Load',)), [('comprehension', ('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 17, 1, 18), 'b', ('Load',)), [], 0)])], [])], None, None, [])], []),
+('Module', [('FunctionDef', (2, 0, 2, 13), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (2, 9, 2, 13))], [('Attribute', (1, 1, 1, 6), ('Attribute', (1, 1, 1, 4), ('Name', (1, 1, 1, 2), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',))], None, None, [])], []),
+('Module', [('Expr', (1, 0, 1, 8), ('NamedExpr', (1, 1, 1, 7), ('Name', (1, 1, 1, 2), 'a', ('Store',)), ('Constant', (1, 6, 1, 7), 1, None)))], []),
+('Module', [('If', (1, 0, 1, 19), ('NamedExpr', (1, 3, 1, 13), ('Name', (1, 3, 1, 4), 'a', ('Store',)), ('Call', (1, 8, 1, 13), ('Name', (1, 8, 1, 11), 'foo', ('Load',)), [], [])), [('Pass', (1, 15, 1, 19))], [])], []),
+('Module', [('While', (1, 0, 1, 22), ('NamedExpr', (1, 6, 1, 16), ('Name', (1, 6, 1, 7), 'a', ('Store',)), ('Call', (1, 11, 1, 16), ('Name', (1, 11, 1, 14), 'foo', ('Load',)), [], [])), [('Pass', (1, 18, 1, 22))], [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 18), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, []), [('Pass', (1, 14, 1, 18))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 26), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None), ('arg', (1, 15, 1, 16), 'd', None, None), ('arg', (1, 18, 1, 19), 'e', None, None)], None, [], [], None, []), [('Pass', (1, 22, 1, 26))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], None, []), [('Pass', (1, 25, 1, 29))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 39), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 12, 1, 13), 'c', None, None)], None, [('arg', (1, 18, 1, 19), 'd', None, None), ('arg', (1, 21, 1, 22), 'e', None, None)], [None, None], ('arg', (1, 26, 1, 32), 'kwargs', None, None), []), [('Pass', (1, 35, 1, 39))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 20), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None)]), [('Pass', (1, 16, 1, 20))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 29), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None), ('arg', (1, 19, 1, 20), 'c', None, None)], None, [], [], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None), ('Constant', (1, 21, 1, 22), 4, None)]), [('Pass', (1, 25, 1, 29))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 32), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 28, 1, 32))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 30), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], None, [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 26, 1, 30))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 42), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [('Constant', (1, 24, 1, 25), 4, None)], ('arg', (1, 29, 1, 35), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 38, 1, 42))], [], None, None, [])], []),
+('Module', [('FunctionDef', (1, 0, 1, 40), 'f', ('arguments', [('arg', (1, 6, 1, 7), 'a', None, None)], [('arg', (1, 14, 1, 15), 'b', None, None)], None, [('arg', (1, 22, 1, 23), 'c', None, None)], [None], ('arg', (1, 27, 1, 33), 'kwargs', None, None), [('Constant', (1, 8, 1, 9), 1, None), ('Constant', (1, 16, 1, 17), 2, None)]), [('Pass', (1, 36, 1, 40))], [], None, None, [])], []),
+('Module', [('TypeAlias', (1, 0, 1, 12), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [], ('Name', (1, 9, 1, 12), 'int', ('Load',)))], []),
+('Module', [('TypeAlias', (1, 0, 1, 15), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 8), 'T', None, None)], ('Name', (1, 12, 1, 15), 'int', ('Load',)))], []),
+('Module', [('TypeAlias', (1, 0, 1, 32), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 8), 'T', None, None), ('TypeVarTuple', (1, 10, 1, 13), 'Ts', None), ('ParamSpec', (1, 15, 1, 18), 'P', None)], ('Tuple', (1, 22, 1, 32), [('Name', (1, 23, 1, 24), 'T', ('Load',)), ('Name', (1, 26, 1, 28), 'Ts', ('Load',)), ('Name', (1, 30, 1, 31), 'P', ('Load',))], ('Load',)))], []),
+('Module', [('TypeAlias', (1, 0, 1, 37), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 13), 'T', ('Name', (1, 10, 1, 13), 'int', ('Load',)), None), ('TypeVarTuple', (1, 15, 1, 18), 'Ts', None), ('ParamSpec', (1, 20, 1, 23), 'P', None)], ('Tuple', (1, 27, 1, 37), [('Name', (1, 28, 1, 29), 'T', ('Load',)), ('Name', (1, 31, 1, 33), 'Ts', ('Load',)), ('Name', (1, 35, 1, 36), 'P', ('Load',))], ('Load',)))], []),
+('Module', [('TypeAlias', (1, 0, 1, 44), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 20), 'T', ('Tuple', (1, 10, 1, 20), [('Name', (1, 11, 1, 14), 'int', ('Load',)), ('Name', (1, 16, 1, 19), 'str', ('Load',))], ('Load',)), None), ('TypeVarTuple', (1, 22, 1, 25), 'Ts', None), ('ParamSpec', (1, 27, 1, 30), 'P', None)], ('Tuple', (1, 34, 1, 44), [('Name', (1, 35, 1, 36), 'T', ('Load',)), ('Name', (1, 38, 1, 40), 'Ts', ('Load',)), ('Name', (1, 42, 1, 43), 'P', ('Load',))], ('Load',)))], []),
+('Module', [('TypeAlias', (1, 0, 1, 48), ('Name', (1, 5, 1, 6), 'X', ('Store',)), [('TypeVar', (1, 7, 1, 17), 'T', ('Name', (1, 10, 1, 13), 'int', ('Load',)), ('Constant', (1, 16, 1, 17), 1, None)), ('TypeVarTuple', (1, 19, 1, 26), 'Ts', ('Constant', (1, 25, 1, 26), 2, None)), ('ParamSpec', (1, 28, 1, 34), 'P', ('Constant', (1, 33, 1, 34), 3, None))], ('Tuple', (1, 38, 1, 48), [('Name', (1, 39, 1, 40), 'T', ('Load',)), ('Name', (1, 42, 1, 44), 'Ts', ('Load',)), ('Name', (1, 46, 1, 47), 'P', ('Load',))], ('Load',)))], []),
+('Module', [('ClassDef', (1, 0, 1, 16), 'X', [], [], [('Pass', (1, 12, 1, 16))], [], [('TypeVar', (1, 8, 1, 9), 'T', None, None)])], []),
+('Module', [('ClassDef', (1, 0, 1, 26), 'X', [], [], [('Pass', (1, 22, 1, 26))], [], [('TypeVar', (1, 8, 1, 9), 'T', None, None), ('TypeVarTuple', (1, 11, 1, 14), 'Ts', None), ('ParamSpec', (1, 16, 1, 19), 'P', None)])], []),
+('Module', [('ClassDef', (1, 0, 1, 31), 'X', [], [], [('Pass', (1, 27, 1, 31))], [], [('TypeVar', (1, 8, 1, 14), 'T', ('Name', (1, 11, 1, 14), 'int', ('Load',)), None), ('TypeVarTuple', (1, 16, 1, 19), 'Ts', None), ('ParamSpec', (1, 21, 1, 24), 'P', None)])], []),
+('Module', [('ClassDef', (1, 0, 1, 38), 'X', [], [], [('Pass', (1, 34, 1, 38))], [], [('TypeVar', (1, 8, 1, 21), 'T', ('Tuple', (1, 11, 1, 21), [('Name', (1, 12, 1, 15), 'int', ('Load',)), ('Name', (1, 17, 1, 20), 'str', ('Load',))], ('Load',)), None), ('TypeVarTuple', (1, 23, 1, 26), 'Ts', None), ('ParamSpec', (1, 28, 1, 31), 'P', None)])], []),
+('Module', [('ClassDef', (1, 0, 1, 43), 'X', [], [], [('Pass', (1, 39, 1, 43))], [], [('TypeVar', (1, 8, 1, 18), 'T', ('Name', (1, 11, 1, 14), 'int', ('Load',)), ('Constant', (1, 17, 1, 18), 1, None)), ('TypeVarTuple', (1, 20, 1, 27), 'Ts', ('Constant', (1, 26, 1, 27), 2, None)), ('ParamSpec', (1, 29, 1, 36), 'P', ('Constant', (1, 35, 1, 36), 3, None))])], []),
+('Module', [('FunctionDef', (1, 0, 1, 16), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 12, 1, 16))], [], None, None, [('TypeVar', (1, 6, 1, 7), 'T', None, None)])], []),
+('Module', [('FunctionDef', (1, 0, 1, 26), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 22, 1, 26))], [], None, None, [('TypeVar', (1, 6, 1, 7), 'T', None, None), ('TypeVarTuple', (1, 9, 1, 12), 'Ts', None), ('ParamSpec', (1, 14, 1, 17), 'P', None)])], []),
+('Module', [('FunctionDef', (1, 0, 1, 31), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 27, 1, 31))], [], None, None, [('TypeVar', (1, 6, 1, 12), 'T', ('Name', (1, 9, 1, 12), 'int', ('Load',)), None), ('TypeVarTuple', (1, 14, 1, 17), 'Ts', None), ('ParamSpec', (1, 19, 1, 22), 'P', None)])], []),
+('Module', [('FunctionDef', (1, 0, 1, 38), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 34, 1, 38))], [], None, None, [('TypeVar', (1, 6, 1, 19), 'T', ('Tuple', (1, 9, 1, 19), [('Name', (1, 10, 1, 13), 'int', ('Load',)), ('Name', (1, 15, 1, 18), 'str', ('Load',))], ('Load',)), None), ('TypeVarTuple', (1, 21, 1, 24), 'Ts', None), ('ParamSpec', (1, 26, 1, 29), 'P', None)])], []),
+('Module', [('FunctionDef', (1, 0, 1, 43), 'f', ('arguments', [], [], None, [], [], None, []), [('Pass', (1, 39, 1, 43))], [], None, None, [('TypeVar', (1, 6, 1, 16), 'T', ('Name', (1, 9, 1, 12), 'int', ('Load',)), ('Constant', (1, 15, 1, 16), 1, None)), ('TypeVarTuple', (1, 18, 1, 25), 'Ts', ('Constant', (1, 24, 1, 25), 2, None)), ('ParamSpec', (1, 27, 1, 34), 'P', ('Constant', (1, 33, 1, 34), 3, None))])], []),
+('Module', [('Match', (1, 0, 3, 6), ('Name', (1, 6, 1, 7), 'x', ('Load',)), [('match_case', ('MatchValue', (2, 6, 2, 7), ('Constant', (2, 6, 2, 7), 1, None)), None, [('Pass', (3, 2, 3, 6))])])], []),
+('Module', [('Match', (1, 0, 5, 6), ('Name', (1, 6, 1, 7), 'x', ('Load',)), [('match_case', ('MatchValue', (2, 6, 2, 7), ('Constant', (2, 6, 2, 7), 1, None)), None, [('Pass', (3, 2, 3, 6))]), ('match_case', ('MatchAs', (4, 6, 4, 7), None, None), None, [('Pass', (5, 2, 5, 6))])])], []),
+]
+single_results = [
+('Interactive', [('Expr', (1, 0, 1, 3), ('BinOp', (1, 0, 1, 3), ('Constant', (1, 0, 1, 1), 1, None), ('Add',), ('Constant', (1, 2, 1, 3), 2, None)))]),
+]
+eval_results = [
+('Expression', ('Constant', (1, 0, 1, 4), None, None)),
+('Expression', ('Constant', (1, 0, 1, 4), True, None)),
+('Expression', ('Constant', (1, 0, 1, 5), False, None)),
+('Expression', ('BoolOp', (1, 0, 1, 7), ('And',), [('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Name', (1, 6, 1, 7), 'b', ('Load',))])),
+('Expression', ('BoolOp', (1, 0, 1, 6), ('Or',), [('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Add',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Sub',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Mult',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Div',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('MatMult',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('FloorDiv',), ('Name', (1, 5, 1, 6), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Pow',), ('Name', (1, 5, 1, 6), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Mod',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('RShift',), ('Name', (1, 5, 1, 6), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('LShift',), ('Name', (1, 5, 1, 6), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('BitXor',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('BitOr',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('BinOp', (1, 0, 1, 5), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('BitAnd',), ('Name', (1, 4, 1, 5), 'b', ('Load',)))),
+('Expression', ('UnaryOp', (1, 0, 1, 5), ('Not',), ('Name', (1, 4, 1, 5), 'v', ('Load',)))),
+('Expression', ('UnaryOp', (1, 0, 1, 2), ('UAdd',), ('Name', (1, 1, 1, 2), 'v', ('Load',)))),
+('Expression', ('UnaryOp', (1, 0, 1, 2), ('USub',), ('Name', (1, 1, 1, 2), 'v', ('Load',)))),
+('Expression', ('UnaryOp', (1, 0, 1, 2), ('Invert',), ('Name', (1, 1, 1, 2), 'v', ('Load',)))),
+('Expression', ('Lambda', (1, 0, 1, 11), ('arguments', [], [], None, [], [], None, []), ('Constant', (1, 7, 1, 11), None, None))),
+('Expression', ('Dict', (1, 0, 1, 7), [('Constant', (1, 2, 1, 3), 1, None)], [('Constant', (1, 4, 1, 5), 2, None)])),
+('Expression', ('Dict', (1, 0, 1, 2), [], [])),
+('Expression', ('Set', (1, 0, 1, 7), [('Constant', (1, 1, 1, 5), None, None)])),
+('Expression', ('Dict', (1, 0, 5, 6), [('Constant', (2, 6, 2, 7), 1, None)], [('Constant', (4, 10, 4, 11), 2, None)])),
+('Expression', ('List', (1, 0, 5, 6), [('Constant', (2, 6, 2, 7), 1, None), ('Constant', (4, 8, 4, 9), 1, None)], ('Load',))),
+('Expression', ('Tuple', (1, 0, 4, 6), [('Constant', (2, 6, 2, 7), 1, None)], ('Load',))),
+('Expression', ('Set', (1, 0, 5, 6), [('Constant', (2, 6, 2, 7), 1, None), ('Constant', (4, 8, 4, 9), 1, None)])),
+('Expression', ('ListComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
+('Expression', ('GeneratorExp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
+('Expression', ('SetComp', (1, 0, 1, 19), ('Name', (1, 1, 1, 2), 'a', ('Load',)), [('comprehension', ('Name', (1, 7, 1, 8), 'b', ('Store',)), ('Name', (1, 12, 1, 13), 'c', ('Load',)), [('Name', (1, 17, 1, 18), 'd', ('Load',))], 0)])),
+('Expression', ('DictComp', (1, 0, 1, 25), ('Name', (1, 1, 1, 2), 'k', ('Load',)), ('Name', (1, 4, 1, 5), 'v', ('Load',)), [('comprehension', ('Tuple', (1, 10, 1, 14), [('Name', (1, 10, 1, 11), 'k', ('Store',)), ('Name', (1, 13, 1, 14), 'v', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [('Name', (1, 23, 1, 24), 'd', ('Load',))], 0)])),
+('Expression', ('ListComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
+('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('ListComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('SetComp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
+('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('SetComp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('GeneratorExp', (1, 0, 1, 20), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 14), [('Name', (1, 11, 1, 12), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 18, 1, 19), 'c', ('Load',)), [], 0)])),
+('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('GeneratorExp', (1, 0, 1, 22), ('Tuple', (1, 1, 1, 6), [('Name', (1, 2, 1, 3), 'a', ('Load',)), ('Name', (1, 4, 1, 5), 'b', ('Load',))], ('Load',)), [('comprehension', ('List', (1, 11, 1, 16), [('Name', (1, 12, 1, 13), 'a', ('Store',)), ('Name', (1, 14, 1, 15), 'b', ('Store',))], ('Store',)), ('Name', (1, 20, 1, 21), 'c', ('Load',)), [], 0)])),
+('Expression', ('Compare', (1, 0, 1, 9), ('Constant', (1, 0, 1, 1), 1, None), [('Lt',), ('Lt',)], [('Constant', (1, 4, 1, 5), 2, None), ('Constant', (1, 8, 1, 9), 3, None)])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('Eq',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('LtE',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('GtE',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('NotEq',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('Is',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 10), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('IsNot',)], [('Name', (1, 9, 1, 10), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('In',)], [('Name', (1, 5, 1, 6), 'b', ('Load',))])),
+('Expression', ('Compare', (1, 0, 1, 10), ('Name', (1, 0, 1, 1), 'a', ('Load',)), [('NotIn',)], [('Name', (1, 9, 1, 10), 'b', ('Load',))])),
+('Expression', ('Call', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [], [])),
+('Expression', ('Call', (1, 0, 1, 17), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Constant', (1, 2, 1, 3), 1, None), ('Constant', (1, 4, 1, 5), 2, None), ('Starred', (1, 10, 1, 12), ('Name', (1, 11, 1, 12), 'd', ('Load',)), ('Load',))], [('keyword', (1, 6, 1, 9), 'c', ('Constant', (1, 8, 1, 9), 3, None)), ('keyword', (1, 13, 1, 16), None, ('Name', (1, 15, 1, 16), 'e', ('Load',)))])),
+('Expression', ('Call', (1, 0, 1, 10), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('Starred', (1, 2, 1, 9), ('List', (1, 3, 1, 9), [('Constant', (1, 4, 1, 5), 0, None), ('Constant', (1, 7, 1, 8), 1, None)], ('Load',)), ('Load',))], [])),
+('Expression', ('Call', (1, 0, 1, 15), ('Name', (1, 0, 1, 1), 'f', ('Load',)), [('GeneratorExp', (1, 1, 1, 15), ('Name', (1, 2, 1, 3), 'a', ('Load',)), [('comprehension', ('Name', (1, 8, 1, 9), 'a', ('Store',)), ('Name', (1, 13, 1, 14), 'b', ('Load',)), [], 0)])], [])),
+('Expression', ('Constant', (1, 0, 1, 2), 10, None)),
+('Expression', ('Constant', (1, 0, 1, 2), 1j, None)),
+('Expression', ('Constant', (1, 0, 1, 8), 'string', None)),
+('Expression', ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',))),
+('Expression', ('Subscript', (1, 0, 1, 6), ('Name', (1, 0, 1, 1), 'a', ('Load',)), ('Slice', (1, 2, 1, 5), ('Name', (1, 2, 1, 3), 'b', ('Load',)), ('Name', (1, 4, 1, 5), 'c', ('Load',)), None), ('Load',))),
+('Expression', ('Name', (1, 0, 1, 1), 'v', ('Load',))),
+('Expression', ('List', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
+('Expression', ('List', (1, 0, 1, 2), [], ('Load',))),
+('Expression', ('Tuple', (1, 0, 1, 5), [('Constant', (1, 0, 1, 1), 1, None), ('Constant', (1, 2, 1, 3), 2, None), ('Constant', (1, 4, 1, 5), 3, None)], ('Load',))),
+('Expression', ('Tuple', (1, 0, 1, 7), [('Constant', (1, 1, 1, 2), 1, None), ('Constant', (1, 3, 1, 4), 2, None), ('Constant', (1, 5, 1, 6), 3, None)], ('Load',))),
+('Expression', ('Tuple', (1, 0, 1, 2), [], ('Load',))),
+('Expression', ('Call', (1, 0, 1, 17), ('Attribute', (1, 0, 1, 7), ('Attribute', (1, 0, 1, 5), ('Attribute', (1, 0, 1, 3), ('Name', (1, 0, 1, 1), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8, 1, 16), ('Attribute', (1, 8, 1, 11), ('Name', (1, 8, 1, 9), 'a', ('Load',)), 'b', ('Load',)), ('Slice', (1, 12, 1, 15), ('Constant', (1, 12, 1, 13), 1, None), ('Constant', (1, 14, 1, 15), 2, None), None), ('Load',))], [])),
+('Expression', ('Subscript', (1, 0, 1, 7), ('List', (1, 0, 1, 3), [('Constant', (1, 1, 1, 2), 5, None)], ('Load',)), ('Slice', (1, 4, 1, 6), ('Constant', (1, 4, 1, 5), 1, None), None, None), ('Load',))),
+('Expression', ('Subscript', (1, 0, 1, 7), ('List', (1, 0, 1, 3), [('Constant', (1, 1, 1, 2), 5, None)], ('Load',)), ('Slice', (1, 4, 1, 6), None, ('Constant', (1, 5, 1, 6), 1, None), None), ('Load',))),
+('Expression', ('Subscript', (1, 0, 1, 8), ('List', (1, 0, 1, 3), [('Constant', (1, 1, 1, 2), 5, None)], ('Load',)), ('Slice', (1, 4, 1, 7), None, None, ('Constant', (1, 6, 1, 7), 1, None)), ('Load',))),
+('Expression', ('Subscript', (1, 0, 1, 10), ('List', (1, 0, 1, 3), [('Constant', (1, 1, 1, 2), 5, None)], ('Load',)), ('Slice', (1, 4, 1, 9), ('Constant', (1, 4, 1, 5), 1, None), ('Constant', (1, 6, 1, 7), 1, None), ('Constant', (1, 8, 1, 9), 1, None)), ('Load',))),
+('Expression', ('IfExp', (1, 0, 1, 21), ('Name', (1, 9, 1, 10), 'x', ('Load',)), ('Call', (1, 0, 1, 5), ('Name', (1, 0, 1, 3), 'foo', ('Load',)), [], []), ('Call', (1, 16, 1, 21), ('Name', (1, 16, 1, 19), 'bar', ('Load',)), [], []))),
+('Expression', ('JoinedStr', (1, 0, 1, 6), [('FormattedValue', (1, 2, 1, 5), ('Name', (1, 3, 1, 4), 'a', ('Load',)), -1, None)])),
+('Expression', ('JoinedStr', (1, 0, 1, 10), [('FormattedValue', (1, 2, 1, 9), ('Name', (1, 3, 1, 4), 'a', ('Load',)), -1, ('JoinedStr', (1, 4, 1, 8), [('Constant', (1, 5, 1, 8), '.2f', None)]))])),
+('Expression', ('JoinedStr', (1, 0, 1, 8), [('FormattedValue', (1, 2, 1, 7), ('Name', (1, 3, 1, 4), 'a', ('Load',)), 114, None)])),
+('Expression', ('JoinedStr', (1, 0, 1, 11), [('Constant', (1, 2, 1, 6), 'foo(', None), ('FormattedValue', (1, 6, 1, 9), ('Name', (1, 7, 1, 8), 'a', ('Load',)), -1, None), ('Constant', (1, 9, 1, 10), ')', None)])),
+]
+main()
diff --git a/Lib/test/test_ast/test_ast.py b/Lib/test/test_ast/test_ast.py
new file mode 100644
index 0000000000..10319e36fa
--- /dev/null
+++ b/Lib/test/test_ast/test_ast.py
@@ -0,0 +1,3757 @@
+import ast
+import builtins
+import copy
+import dis
+import enum
+import os
+import re
+import sys
+import textwrap
+import types
+import unittest
+import warnings
+import weakref
+from functools import partial
+from textwrap import dedent
+
+try:
+ import _testinternalcapi
+except ImportError:
+ _testinternalcapi = None
+
+from test import support
+from test.support.import_helper import import_fresh_module
+from test.support import os_helper, script_helper
+from test.support.ast_helper import ASTTestMixin
+from test.test_ast.utils import to_tuple
+from test.test_ast.snippets import (
+ eval_tests, eval_results, exec_tests, exec_results, single_tests, single_results
+)
+
+
+class AST_Tests(unittest.TestCase):
+ maxDiff = None
+
+ def _is_ast_node(self, name, node):
+ if not isinstance(node, type):
+ return False
+ if "ast" not in node.__module__:
+ return False
+ return name != "AST" and name[0].isupper()
+
+ def _assertTrueorder(self, ast_node, parent_pos):
+ if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
+ return
+ if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
+ node_pos = (ast_node.lineno, ast_node.col_offset)
+ self.assertGreaterEqual(node_pos, parent_pos)
+ parent_pos = (ast_node.lineno, ast_node.col_offset)
+ for name in ast_node._fields:
+ value = getattr(ast_node, name)
+ if isinstance(value, list):
+ first_pos = parent_pos
+ if value and name == "decorator_list":
+ first_pos = (value[0].lineno, value[0].col_offset)
+ for child in value:
+ self._assertTrueorder(child, first_pos)
+ elif value is not None:
+ self._assertTrueorder(value, parent_pos)
+ self.assertEqual(ast_node._fields, ast_node.__match_args__)
+
+ def test_AST_objects(self):
+ x = ast.AST()
+ self.assertEqual(x._fields, ())
+ x.foobar = 42
+ self.assertEqual(x.foobar, 42)
+ self.assertEqual(x.__dict__["foobar"], 42)
+
+ with self.assertRaises(AttributeError):
+ x.vararg
+
+ with self.assertRaises(TypeError):
+ # "ast.AST constructor takes 0 positional arguments"
+ ast.AST(2)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_AST_fields_NULL_check(self):
+ # See: https://github.com/python/cpython/issues/126105
+ old_value = ast.AST._fields
+
+ def cleanup():
+ ast.AST._fields = old_value
+ self.addCleanup(cleanup)
+
+ del ast.AST._fields
+
+ msg = "type object 'ast.AST' has no attribute '_fields'"
+ # Both examples used to crash:
+ with self.assertRaisesRegex(AttributeError, msg):
+ ast.AST(arg1=123)
+ with self.assertRaisesRegex(AttributeError, msg):
+ ast.AST()
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_AST_garbage_collection(self):
+ class X:
+ pass
+
+ a = ast.AST()
+ a.x = X()
+ a.x.a = a
+ ref = weakref.ref(a.x)
+ del a
+ support.gc_collect()
+ self.assertIsNone(ref())
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_snippets(self):
+ for input, output, kind in (
+ (exec_tests, exec_results, "exec"),
+ (single_tests, single_results, "single"),
+ (eval_tests, eval_results, "eval"),
+ ):
+ for i, o in zip(input, output):
+ with self.subTest(action="parsing", input=i):
+ ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
+ self.assertEqual(to_tuple(ast_tree), o)
+ self._assertTrueorder(ast_tree, (0, 0))
+ with self.subTest(action="compiling", input=i, kind=kind):
+ compile(ast_tree, "?", kind)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_ast_validation(self):
+ # compile() is the only function that calls PyAST_Validate
+ snippets_to_validate = exec_tests + single_tests + eval_tests
+ for snippet in snippets_to_validate:
+ tree = ast.parse(snippet)
+ compile(tree, "", "exec")
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_optimization_levels__debug__(self):
+ cases = [(-1, "__debug__"), (0, "__debug__"), (1, False), (2, False)]
+ for optval, expected in cases:
+ with self.subTest(optval=optval, expected=expected):
+ res1 = ast.parse("__debug__", optimize=optval)
+ res2 = ast.parse(ast.parse("__debug__"), optimize=optval)
+ for res in [res1, res2]:
+ self.assertIsInstance(res.body[0], ast.Expr)
+ if isinstance(expected, bool):
+ self.assertIsInstance(res.body[0].value, ast.Constant)
+ self.assertEqual(res.body[0].value.value, expected)
+ else:
+ self.assertIsInstance(res.body[0].value, ast.Name)
+ self.assertEqual(res.body[0].value.id, expected)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_optimization_levels_const_folding(self):
+ folded = ("Expr", (1, 0, 1, 5), ("Constant", (1, 0, 1, 5), 3, None))
+ not_folded = (
+ "Expr",
+ (1, 0, 1, 5),
+ (
+ "BinOp",
+ (1, 0, 1, 5),
+ ("Constant", (1, 0, 1, 1), 1, None),
+ ("Add",),
+ ("Constant", (1, 4, 1, 5), 2, None),
+ ),
+ )
+
+ cases = [(-1, not_folded), (0, not_folded), (1, folded), (2, folded)]
+ for optval, expected in cases:
+ with self.subTest(optval=optval):
+ tree1 = ast.parse("1 + 2", optimize=optval)
+ tree2 = ast.parse(ast.parse("1 + 2"), optimize=optval)
+ for tree in [tree1, tree2]:
+ res = to_tuple(tree.body[0])
+ self.assertEqual(res, expected)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_invalid_position_information(self):
+ invalid_linenos = [(10, 1), (-10, -11), (10, -11), (-5, -2), (-5, 1)]
+
+ for lineno, end_lineno in invalid_linenos:
+ with self.subTest(f"Check invalid linenos {lineno}:{end_lineno}"):
+ snippet = "a = 1"
+ tree = ast.parse(snippet)
+ tree.body[0].lineno = lineno
+ tree.body[0].end_lineno = end_lineno
+ with self.assertRaises(ValueError):
+ compile(tree, "", "exec")
+
+ invalid_col_offsets = [(10, 1), (-10, -11), (10, -11), (-5, -2), (-5, 1)]
+ for col_offset, end_col_offset in invalid_col_offsets:
+ with self.subTest(
+ f"Check invalid col_offset {col_offset}:{end_col_offset}"
+ ):
+ snippet = "a = 1"
+ tree = ast.parse(snippet)
+ tree.body[0].col_offset = col_offset
+ tree.body[0].end_col_offset = end_col_offset
+ with self.assertRaises(ValueError):
+ compile(tree, "", "exec")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_compilation_of_ast_nodes_with_default_end_position_values(self):
+ tree = ast.Module(
+ body=[
+ ast.Import(
+ names=[ast.alias(name="builtins", lineno=1, col_offset=0)],
+ lineno=1,
+ col_offset=0,
+ ),
+ ast.Import(
+ names=[ast.alias(name="traceback", lineno=0, col_offset=0)],
+ lineno=0,
+ col_offset=1,
+ ),
+ ],
+ type_ignores=[],
+ )
+
+ # Check that compilation doesn't crash. Note: this may crash explicitly only on debug mode.
+ compile(tree, "", "exec")
+
+ # TODO: RUSTPYTHON; TypeError: required field "end_lineno" missing from alias
+ @unittest.expectedFailure
+ def test_negative_locations_for_compile(self):
+ # See https://github.com/python/cpython/issues/130775
+ alias = ast.alias(name='traceback', lineno=0, col_offset=0)
+ for attrs in (
+ {'lineno': -2, 'col_offset': 0},
+ {'lineno': 0, 'col_offset': -2},
+ {'lineno': 0, 'col_offset': -2, 'end_col_offset': -2},
+ {'lineno': -2, 'end_lineno': -2, 'col_offset': 0},
+ ):
+ with self.subTest(attrs=attrs):
+ tree = ast.Module(body=[
+ ast.Import(names=[alias], **attrs)
+ ], type_ignores=[])
+
+ # It used to crash on this step:
+ compile(tree, "", "exec")
+
+ # This also must not crash:
+ ast.parse(tree, optimize=2)
+
+ def test_slice(self):
+ slc = ast.parse("x[::]").body[0].value.slice
+ self.assertIsNone(slc.upper)
+ self.assertIsNone(slc.lower)
+ self.assertIsNone(slc.step)
+
+ def test_from_import(self):
+ im = ast.parse("from . import y").body[0]
+ self.assertIsNone(im.module)
+
+ def test_non_interned_future_from_ast(self):
+ mod = ast.parse("from __future__ import division")
+ self.assertIsInstance(mod.body[0], ast.ImportFrom)
+ mod.body[0].module = " __future__ ".strip()
+ compile(mod, "", "exec")
+
+ def test_alias(self):
+ im = ast.parse("from bar import y").body[0]
+ self.assertEqual(len(im.names), 1)
+ alias = im.names[0]
+ self.assertEqual(alias.name, "y")
+ self.assertIsNone(alias.asname)
+ self.assertEqual(alias.lineno, 1)
+ self.assertEqual(alias.end_lineno, 1)
+ self.assertEqual(alias.col_offset, 16)
+ self.assertEqual(alias.end_col_offset, 17)
+
+ im = ast.parse("from bar import *").body[0]
+ alias = im.names[0]
+ self.assertEqual(alias.name, "*")
+ self.assertIsNone(alias.asname)
+ self.assertEqual(alias.lineno, 1)
+ self.assertEqual(alias.end_lineno, 1)
+ self.assertEqual(alias.col_offset, 16)
+ self.assertEqual(alias.end_col_offset, 17)
+
+ im = ast.parse("from bar import y as z").body[0]
+ alias = im.names[0]
+ self.assertEqual(alias.name, "y")
+ self.assertEqual(alias.asname, "z")
+ self.assertEqual(alias.lineno, 1)
+ self.assertEqual(alias.end_lineno, 1)
+ self.assertEqual(alias.col_offset, 16)
+ self.assertEqual(alias.end_col_offset, 22)
+
+ im = ast.parse("import bar as foo").body[0]
+ alias = im.names[0]
+ self.assertEqual(alias.name, "bar")
+ self.assertEqual(alias.asname, "foo")
+ self.assertEqual(alias.lineno, 1)
+ self.assertEqual(alias.end_lineno, 1)
+ self.assertEqual(alias.col_offset, 7)
+ self.assertEqual(alias.end_col_offset, 17)
+
+ def test_base_classes(self):
+ self.assertTrue(issubclass(ast.For, ast.stmt))
+ self.assertTrue(issubclass(ast.Name, ast.expr))
+ self.assertTrue(issubclass(ast.stmt, ast.AST))
+ self.assertTrue(issubclass(ast.expr, ast.AST))
+ self.assertTrue(issubclass(ast.comprehension, ast.AST))
+ self.assertTrue(issubclass(ast.Gt, ast.AST))
+
+ def test_import_deprecated(self):
+ ast = import_fresh_module("ast")
+ depr_regex = (
+ r"ast\.{} is deprecated and will be removed in Python 3.14; "
+ r"use ast\.Constant instead"
+ )
+ for name in "Num", "Str", "Bytes", "NameConstant", "Ellipsis":
+ with self.assertWarnsRegex(DeprecationWarning, depr_regex.format(name)):
+ getattr(ast, name)
+
+ def test_field_attr_existence_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num, Str, Bytes, NameConstant, Ellipsis
+
+ for name in ("Num", "Str", "Bytes", "NameConstant", "Ellipsis"):
+ item = getattr(ast, name)
+ if self._is_ast_node(name, item):
+ with self.subTest(item):
+ with self.assertWarns(DeprecationWarning):
+ x = item()
+ if isinstance(x, ast.AST):
+ self.assertIs(type(x._fields), tuple)
+
+ # TODO: RUSTPYTHON; type object 'Module' has no attribute '__annotations__'
+ @unittest.expectedFailure
+ def test_field_attr_existence(self):
+ for name, item in ast.__dict__.items():
+ # These emit DeprecationWarnings
+ if name in {"Num", "Str", "Bytes", "NameConstant", "Ellipsis"}:
+ continue
+ # constructor has a different signature
+ if name == "Index":
+ continue
+ if self._is_ast_node(name, item):
+ x = self._construct_ast_class(item)
+ if isinstance(x, ast.AST):
+ self.assertIs(type(x._fields), tuple)
+
+ def _construct_ast_class(self, cls):
+ kwargs = {}
+ for name, typ in cls.__annotations__.items():
+ if typ is str:
+ kwargs[name] = "capybara"
+ elif typ is int:
+ kwargs[name] = 42
+ elif typ is object:
+ kwargs[name] = b"capybara"
+ elif isinstance(typ, type) and issubclass(typ, ast.AST):
+ kwargs[name] = self._construct_ast_class(typ)
+ return cls(**kwargs)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_arguments(self):
+ x = ast.arguments()
+ self.assertEqual(
+ x._fields,
+ (
+ "posonlyargs",
+ "args",
+ "vararg",
+ "kwonlyargs",
+ "kw_defaults",
+ "kwarg",
+ "defaults",
+ ),
+ )
+ self.assertEqual(
+ x.__annotations__,
+ {
+ "posonlyargs": list[ast.arg],
+ "args": list[ast.arg],
+ "vararg": ast.arg | None,
+ "kwonlyargs": list[ast.arg],
+ "kw_defaults": list[ast.expr],
+ "kwarg": ast.arg | None,
+ "defaults": list[ast.expr],
+ },
+ )
+
+ self.assertEqual(x.args, [])
+ self.assertIsNone(x.vararg)
+
+ x = ast.arguments(*range(1, 8))
+ self.assertEqual(x.args, 2)
+ self.assertEqual(x.vararg, 3)
+
+ def test_field_attr_writable_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ x = ast.Num()
+ # We can assign to _fields
+ x._fields = 666
+ self.assertEqual(x._fields, 666)
+
+ def test_field_attr_writable(self):
+ x = ast.Constant(1)
+ # We can assign to _fields
+ x._fields = 666
+ self.assertEqual(x._fields, 666)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_classattrs_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num, Str, Bytes, NameConstant, Ellipsis
+
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+ x = ast.Num()
+ self.assertEqual(x._fields, ("value", "kind"))
+
+ with self.assertRaises(AttributeError):
+ x.value
+
+ with self.assertRaises(AttributeError):
+ x.n
+
+ x = ast.Num(42)
+ self.assertEqual(x.value, 42)
+ self.assertEqual(x.n, 42)
+
+ with self.assertRaises(AttributeError):
+ x.lineno
+
+ with self.assertRaises(AttributeError):
+ x.foobar
+
+ x = ast.Num(lineno=2)
+ self.assertEqual(x.lineno, 2)
+
+ x = ast.Num(42, lineno=0)
+ self.assertEqual(x.lineno, 0)
+ self.assertEqual(x._fields, ("value", "kind"))
+ self.assertEqual(x.value, 42)
+ self.assertEqual(x.n, 42)
+
+ self.assertRaises(TypeError, ast.Num, 1, None, 2)
+ self.assertRaises(TypeError, ast.Num, 1, None, 2, lineno=0)
+
+ # Arbitrary keyword arguments are supported
+ self.assertEqual(ast.Num(1, foo="bar").foo, "bar")
+
+ with self.assertRaisesRegex(
+ TypeError, "Num got multiple values for argument 'n'"
+ ):
+ ast.Num(1, n=2)
+
+ self.assertEqual(ast.Num(42).n, 42)
+ self.assertEqual(ast.Num(4.25).n, 4.25)
+ self.assertEqual(ast.Num(4.25j).n, 4.25j)
+ self.assertEqual(ast.Str("42").s, "42")
+ self.assertEqual(ast.Bytes(b"42").s, b"42")
+ self.assertIs(ast.NameConstant(True).value, True)
+ self.assertIs(ast.NameConstant(False).value, False)
+ self.assertIs(ast.NameConstant(None).value, None)
+
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Constant.__init__ missing 1 required positional argument: 'value'. This will become "
+ "an error in Python 3.15.",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Constant.__init__ missing 1 required positional argument: 'value'. This will become "
+ "an error in Python 3.15.",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Constant.__init__ got an unexpected keyword argument 'foo'. Support for "
+ "arbitrary keyword arguments is deprecated and will be removed in Python "
+ "3.15.",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute s is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Bytes is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute s is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ ],
+ )
+
+ # TODO: RUSTPYTHON; DeprecationWarning not triggered
+ @unittest.expectedFailure
+ def test_classattrs(self):
+ with self.assertWarns(DeprecationWarning):
+ x = ast.Constant()
+ self.assertEqual(x._fields, ("value", "kind"))
+
+ with self.assertRaises(AttributeError):
+ x.value
+
+ x = ast.Constant(42)
+ self.assertEqual(x.value, 42)
+
+ with self.assertRaises(AttributeError):
+ x.lineno
+
+ with self.assertRaises(AttributeError):
+ x.foobar
+
+ x = ast.Constant(lineno=2, value=3)
+ self.assertEqual(x.lineno, 2)
+
+ x = ast.Constant(42, lineno=0)
+ self.assertEqual(x.lineno, 0)
+ self.assertEqual(x._fields, ("value", "kind"))
+ self.assertEqual(x.value, 42)
+
+ self.assertRaises(TypeError, ast.Constant, 1, None, 2)
+ self.assertRaises(TypeError, ast.Constant, 1, None, 2, lineno=0)
+
+ # Arbitrary keyword arguments are supported (but deprecated)
+ with self.assertWarns(DeprecationWarning):
+ self.assertEqual(ast.Constant(1, foo="bar").foo, "bar")
+
+ with self.assertRaisesRegex(
+ TypeError, "Constant got multiple values for argument 'value'"
+ ):
+ ast.Constant(1, value=2)
+
+ self.assertEqual(ast.Constant(42).value, 42)
+ self.assertEqual(ast.Constant(4.25).value, 4.25)
+ self.assertEqual(ast.Constant(4.25j).value, 4.25j)
+ self.assertEqual(ast.Constant("42").value, "42")
+ self.assertEqual(ast.Constant(b"42").value, b"42")
+ self.assertIs(ast.Constant(True).value, True)
+ self.assertIs(ast.Constant(False).value, False)
+ self.assertIs(ast.Constant(None).value, None)
+ self.assertIs(ast.Constant(...).value, ...)
+
+ def test_realtype(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num, Str, Bytes, NameConstant, Ellipsis
+
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+ self.assertIs(type(ast.Num(42)), ast.Constant)
+ self.assertIs(type(ast.Num(4.25)), ast.Constant)
+ self.assertIs(type(ast.Num(4.25j)), ast.Constant)
+ self.assertIs(type(ast.Str("42")), ast.Constant)
+ self.assertIs(type(ast.Bytes(b"42")), ast.Constant)
+ self.assertIs(type(ast.NameConstant(True)), ast.Constant)
+ self.assertIs(type(ast.NameConstant(False)), ast.Constant)
+ self.assertIs(type(ast.NameConstant(None)), ast.Constant)
+ self.assertIs(type(ast.Ellipsis()), ast.Constant)
+
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Bytes is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Ellipsis is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ ],
+ )
+
+ def test_isinstance(self):
+ from ast import Constant
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num, Str, Bytes, NameConstant, Ellipsis
+
+ cls_depr_msg = (
+ "ast.{} is deprecated and will be removed in Python 3.14; "
+ "use ast.Constant instead"
+ )
+
+ assertNumDeprecated = partial(
+ self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Num")
+ )
+ assertStrDeprecated = partial(
+ self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Str")
+ )
+ assertBytesDeprecated = partial(
+ self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Bytes")
+ )
+ assertNameConstantDeprecated = partial(
+ self.assertWarnsRegex,
+ DeprecationWarning,
+ cls_depr_msg.format("NameConstant"),
+ )
+ assertEllipsisDeprecated = partial(
+ self.assertWarnsRegex, DeprecationWarning, cls_depr_msg.format("Ellipsis")
+ )
+
+ for arg in 42, 4.2, 4.2j:
+ with self.subTest(arg=arg):
+ with assertNumDeprecated():
+ n = Num(arg)
+ with assertNumDeprecated():
+ self.assertIsInstance(n, Num)
+
+ with assertStrDeprecated():
+ s = Str("42")
+ with assertStrDeprecated():
+ self.assertIsInstance(s, Str)
+
+ with assertBytesDeprecated():
+ b = Bytes(b"42")
+ with assertBytesDeprecated():
+ self.assertIsInstance(b, Bytes)
+
+ for arg in True, False, None:
+ with self.subTest(arg=arg):
+ with assertNameConstantDeprecated():
+ n = NameConstant(arg)
+ with assertNameConstantDeprecated():
+ self.assertIsInstance(n, NameConstant)
+
+ with assertEllipsisDeprecated():
+ e = Ellipsis()
+ with assertEllipsisDeprecated():
+ self.assertIsInstance(e, Ellipsis)
+
+ for arg in 42, 4.2, 4.2j:
+ with self.subTest(arg=arg):
+ with assertNumDeprecated():
+ self.assertIsInstance(Constant(arg), Num)
+
+ with assertStrDeprecated():
+ self.assertIsInstance(Constant("42"), Str)
+
+ with assertBytesDeprecated():
+ self.assertIsInstance(Constant(b"42"), Bytes)
+
+ for arg in True, False, None:
+ with self.subTest(arg=arg):
+ with assertNameConstantDeprecated():
+ self.assertIsInstance(Constant(arg), NameConstant)
+
+ with assertEllipsisDeprecated():
+ self.assertIsInstance(Constant(...), Ellipsis)
+
+ with assertStrDeprecated():
+ s = Str("42")
+ assertNumDeprecated(self.assertNotIsInstance, s, Num)
+ assertBytesDeprecated(self.assertNotIsInstance, s, Bytes)
+
+ with assertNumDeprecated():
+ n = Num(42)
+ assertStrDeprecated(self.assertNotIsInstance, n, Str)
+ assertNameConstantDeprecated(self.assertNotIsInstance, n, NameConstant)
+ assertEllipsisDeprecated(self.assertNotIsInstance, n, Ellipsis)
+
+ with assertNameConstantDeprecated():
+ n = NameConstant(True)
+ with assertNumDeprecated():
+ self.assertNotIsInstance(n, Num)
+
+ with assertNameConstantDeprecated():
+ n = NameConstant(False)
+ with assertNumDeprecated():
+ self.assertNotIsInstance(n, Num)
+
+ for arg in "42", True, False:
+ with self.subTest(arg=arg):
+ with assertNumDeprecated():
+ self.assertNotIsInstance(Constant(arg), Num)
+
+ assertStrDeprecated(self.assertNotIsInstance, Constant(42), Str)
+ assertBytesDeprecated(self.assertNotIsInstance, Constant("42"), Bytes)
+ assertNameConstantDeprecated(
+ self.assertNotIsInstance, Constant(42), NameConstant
+ )
+ assertEllipsisDeprecated(self.assertNotIsInstance, Constant(42), Ellipsis)
+ assertNumDeprecated(self.assertNotIsInstance, Constant(None), Num)
+ assertStrDeprecated(self.assertNotIsInstance, Constant(None), Str)
+ assertBytesDeprecated(self.assertNotIsInstance, Constant(None), Bytes)
+ assertNameConstantDeprecated(
+ self.assertNotIsInstance, Constant(1), NameConstant
+ )
+ assertEllipsisDeprecated(self.assertNotIsInstance, Constant(None), Ellipsis)
+
+ class S(str):
+ pass
+
+ with assertStrDeprecated():
+ self.assertIsInstance(Constant(S("42")), Str)
+ with assertNumDeprecated():
+ self.assertNotIsInstance(Constant(S("42")), Num)
+
+ # TODO: RUSTPYTHON; will be removed in Python 3.14
+ @unittest.expectedFailure
+ def test_constant_subclasses_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num
+
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+
+ class N(ast.Num):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.z = "spam"
+
+ class N2(ast.Num):
+ pass
+
+ n = N(42)
+ self.assertEqual(n.n, 42)
+ self.assertEqual(n.z, "spam")
+ self.assertIs(type(n), N)
+ self.assertIsInstance(n, N)
+ self.assertIsInstance(n, ast.Num)
+ self.assertNotIsInstance(n, N2)
+ self.assertNotIsInstance(ast.Num(42), N)
+ n = N(n=42)
+ self.assertEqual(n.n, 42)
+ self.assertIs(type(n), N)
+
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ ],
+ )
+
+ def test_constant_subclasses(self):
+ class N(ast.Constant):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.z = "spam"
+
+ class N2(ast.Constant):
+ pass
+
+ n = N(42)
+ self.assertEqual(n.value, 42)
+ self.assertEqual(n.z, "spam")
+ self.assertEqual(type(n), N)
+ self.assertTrue(isinstance(n, N))
+ self.assertTrue(isinstance(n, ast.Constant))
+ self.assertFalse(isinstance(n, N2))
+ self.assertFalse(isinstance(ast.Constant(42), N))
+ n = N(value=42)
+ self.assertEqual(n.value, 42)
+ self.assertEqual(type(n), N)
+
+ def test_module(self):
+ body = [ast.Constant(42)]
+ x = ast.Module(body, [])
+ self.assertEqual(x.body, body)
+
+ # TODO: RUSTPYTHON; DeprecationWarning not triggered
+ @unittest.expectedFailure
+ def test_nodeclasses(self):
+ # Zero arguments constructor explicitly allowed (but deprecated)
+ with self.assertWarns(DeprecationWarning):
+ x = ast.BinOp()
+ self.assertEqual(x._fields, ("left", "op", "right"))
+
+ # Random attribute allowed too
+ x.foobarbaz = 5
+ self.assertEqual(x.foobarbaz, 5)
+
+ n1 = ast.Constant(1)
+ n3 = ast.Constant(3)
+ addop = ast.Add()
+ x = ast.BinOp(n1, addop, n3)
+ self.assertEqual(x.left, n1)
+ self.assertEqual(x.op, addop)
+ self.assertEqual(x.right, n3)
+
+ x = ast.BinOp(1, 2, 3)
+ self.assertEqual(x.left, 1)
+ self.assertEqual(x.op, 2)
+ self.assertEqual(x.right, 3)
+
+ x = ast.BinOp(1, 2, 3, lineno=0)
+ self.assertEqual(x.left, 1)
+ self.assertEqual(x.op, 2)
+ self.assertEqual(x.right, 3)
+ self.assertEqual(x.lineno, 0)
+
+ # node raises exception when given too many arguments
+ self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
+ # node raises exception when given too many arguments
+ self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
+
+ # can set attributes through kwargs too
+ x = ast.BinOp(left=1, op=2, right=3, lineno=0)
+ self.assertEqual(x.left, 1)
+ self.assertEqual(x.op, 2)
+ self.assertEqual(x.right, 3)
+ self.assertEqual(x.lineno, 0)
+
+ # Random kwargs also allowed (but deprecated)
+ with self.assertWarns(DeprecationWarning):
+ x = ast.BinOp(1, 2, 3, foobarbaz=42)
+ self.assertEqual(x.foobarbaz, 42)
+
+ def test_no_fields(self):
+ # this used to fail because Sub._fields was None
+ x = ast.Sub()
+ self.assertEqual(x._fields, ())
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_invalid_sum(self):
+ pos = dict(lineno=2, col_offset=3)
+ m = ast.Module([ast.Expr(ast.expr(**pos), **pos)], [])
+ with self.assertRaises(TypeError) as cm:
+ compile(m, "", "exec")
+ self.assertIn("but got ", "exec")
+ self.assertIn("identifier must be of type str", str(cm.exception))
+
+ def test_invalid_constant(self):
+ for invalid_constant in int, (1, 2, int), frozenset((1, 2, int)):
+ e = ast.Expression(body=ast.Constant(invalid_constant))
+ ast.fix_missing_locations(e)
+ with self.assertRaisesRegex(TypeError, "invalid type in Constant: type"):
+ compile(e, "", "eval")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_empty_yield_from(self):
+ # Issue 16546: yield from value is not optional.
+ empty_yield_from = ast.parse("def f():\n yield from g()")
+ empty_yield_from.body[0].body[0].value.value = None
+ with self.assertRaises(ValueError) as cm:
+ compile(empty_yield_from, "", "exec")
+ self.assertIn("field 'value' is required", str(cm.exception))
+
+ @support.cpython_only
+ def test_issue31592(self):
+ # There shouldn't be an assertion failure in case of a bad
+ # unicodedata.normalize().
+ import unicodedata
+
+ def bad_normalize(*args):
+ return None
+
+ with support.swap_attr(unicodedata, "normalize", bad_normalize):
+ self.assertRaises(TypeError, ast.parse, "\u03d5")
+
+ def test_issue18374_binop_col_offset(self):
+ tree = ast.parse("4+5+6+7")
+ parent_binop = tree.body[0].value
+ child_binop = parent_binop.left
+ grandchild_binop = child_binop.left
+ self.assertEqual(parent_binop.col_offset, 0)
+ self.assertEqual(parent_binop.end_col_offset, 7)
+ self.assertEqual(child_binop.col_offset, 0)
+ self.assertEqual(child_binop.end_col_offset, 5)
+ self.assertEqual(grandchild_binop.col_offset, 0)
+ self.assertEqual(grandchild_binop.end_col_offset, 3)
+
+ tree = ast.parse("4+5-\\\n 6-7")
+ parent_binop = tree.body[0].value
+ child_binop = parent_binop.left
+ grandchild_binop = child_binop.left
+ self.assertEqual(parent_binop.col_offset, 0)
+ self.assertEqual(parent_binop.lineno, 1)
+ self.assertEqual(parent_binop.end_col_offset, 4)
+ self.assertEqual(parent_binop.end_lineno, 2)
+
+ self.assertEqual(child_binop.col_offset, 0)
+ self.assertEqual(child_binop.lineno, 1)
+ self.assertEqual(child_binop.end_col_offset, 2)
+ self.assertEqual(child_binop.end_lineno, 2)
+
+ self.assertEqual(grandchild_binop.col_offset, 0)
+ self.assertEqual(grandchild_binop.lineno, 1)
+ self.assertEqual(grandchild_binop.end_col_offset, 3)
+ self.assertEqual(grandchild_binop.end_lineno, 1)
+
+ def test_issue39579_dotted_name_end_col_offset(self):
+ tree = ast.parse("@a.b.c\ndef f(): pass")
+ attr_b = tree.body[0].decorator_list[0].value
+ self.assertEqual(attr_b.end_col_offset, 4)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_ast_asdl_signature(self):
+ self.assertEqual(
+ ast.withitem.__doc__, "withitem(expr context_expr, expr? optional_vars)"
+ )
+ self.assertEqual(ast.GtE.__doc__, "GtE")
+ self.assertEqual(ast.Name.__doc__, "Name(identifier id, expr_context ctx)")
+ self.assertEqual(
+ ast.cmpop.__doc__,
+ "cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn",
+ )
+ expressions = [f" | {node.__doc__}" for node in ast.expr.__subclasses__()]
+ expressions[0] = f"expr = {ast.expr.__subclasses__()[0].__doc__}"
+ self.assertCountEqual(ast.expr.__doc__.split("\n"), expressions)
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_positional_only_feature_version(self):
+ ast.parse("def foo(x, /): ...", feature_version=(3, 8))
+ ast.parse("def bar(x=1, /): ...", feature_version=(3, 8))
+ with self.assertRaises(SyntaxError):
+ ast.parse("def foo(x, /): ...", feature_version=(3, 7))
+ with self.assertRaises(SyntaxError):
+ ast.parse("def bar(x=1, /): ...", feature_version=(3, 7))
+
+ ast.parse("lambda x, /: ...", feature_version=(3, 8))
+ ast.parse("lambda x=1, /: ...", feature_version=(3, 8))
+ with self.assertRaises(SyntaxError):
+ ast.parse("lambda x, /: ...", feature_version=(3, 7))
+ with self.assertRaises(SyntaxError):
+ ast.parse("lambda x=1, /: ...", feature_version=(3, 7))
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_assignment_expression_feature_version(self):
+ ast.parse("(x := 0)", feature_version=(3, 8))
+ with self.assertRaises(SyntaxError):
+ ast.parse("(x := 0)", feature_version=(3, 7))
+
+ def test_conditional_context_managers_parse_with_low_feature_version(self):
+ # regression test for gh-115881
+ ast.parse("with (x() if y else z()): ...", feature_version=(3, 8))
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_exception_groups_feature_version(self):
+ code = dedent("""
+ try: ...
+ except* Exception: ...
+ """)
+ ast.parse(code)
+ with self.assertRaises(SyntaxError):
+ ast.parse(code, feature_version=(3, 10))
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_type_params_feature_version(self):
+ samples = [
+ "type X = int",
+ "class X[T]: pass",
+ "def f[T](): pass",
+ ]
+ for sample in samples:
+ with self.subTest(sample):
+ ast.parse(sample)
+ with self.assertRaises(SyntaxError):
+ ast.parse(sample, feature_version=(3, 11))
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_type_params_default_feature_version(self):
+ samples = [
+ "type X[*Ts=int] = int",
+ "class X[T=int]: pass",
+ "def f[**P=int](): pass",
+ ]
+ for sample in samples:
+ with self.subTest(sample):
+ ast.parse(sample)
+ with self.assertRaises(SyntaxError):
+ ast.parse(sample, feature_version=(3, 12))
+
+ def test_invalid_major_feature_version(self):
+ with self.assertRaises(ValueError):
+ ast.parse("pass", feature_version=(2, 7))
+ with self.assertRaises(ValueError):
+ ast.parse("pass", feature_version=(4, 0))
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_constant_as_name(self):
+ for constant in "True", "False", "None":
+ expr = ast.Expression(ast.Name(constant, ast.Load()))
+ ast.fix_missing_locations(expr)
+ with self.assertRaisesRegex(
+ ValueError, f"identifier field can't represent '{constant}' constant"
+ ):
+ compile(expr, "", "eval")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_constant_as_unicode_name(self):
+ constants = [
+ ("True", b"Tru\xe1\xb5\x89"),
+ ("False", b"Fal\xc5\xbfe"),
+ ("None", b"N\xc2\xbane"),
+ ]
+ for constant in constants:
+ with self.assertRaisesRegex(ValueError,
+ f"identifier field can't represent '{constant[0]}' constant"):
+ ast.parse(constant[1], mode="eval")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_precedence_enum(self):
+ class _Precedence(enum.IntEnum):
+ """Precedence table that originated from python grammar."""
+
+ NAMED_EXPR = enum.auto() # :=
+ TUPLE = enum.auto() # ,
+ YIELD = enum.auto() # 'yield', 'yield from'
+ TEST = enum.auto() # 'if'-'else', 'lambda'
+ OR = enum.auto() # 'or'
+ AND = enum.auto() # 'and'
+ NOT = enum.auto() # 'not'
+ CMP = enum.auto() # '<', '>', '==', '>=', '<=', '!=',
+ # 'in', 'not in', 'is', 'is not'
+ EXPR = enum.auto()
+ BOR = EXPR # '|'
+ BXOR = enum.auto() # '^'
+ BAND = enum.auto() # '&'
+ SHIFT = enum.auto() # '<<', '>>'
+ ARITH = enum.auto() # '+', '-'
+ TERM = enum.auto() # '*', '@', '/', '%', '//'
+ FACTOR = enum.auto() # unary '+', '-', '~'
+ POWER = enum.auto() # '**'
+ AWAIT = enum.auto() # 'await'
+ ATOM = enum.auto()
+
+ def next(self):
+ try:
+ return self.__class__(self + 1)
+ except ValueError:
+ return self
+
+ enum._test_simple_enum(_Precedence, ast._Precedence)
+
+ @support.cpython_only
+ def test_ast_recursion_limit(self):
+ fail_depth = support.exceeds_recursion_limit()
+ crash_depth = 100_000
+ success_depth = int(support.get_c_recursion_limit() * 0.8)
+ if _testinternalcapi is not None:
+ remaining = _testinternalcapi.get_c_recursion_remaining()
+ success_depth = min(success_depth, remaining)
+
+ def check_limit(prefix, repeated):
+ expect_ok = prefix + repeated * success_depth
+ ast.parse(expect_ok)
+ for depth in (fail_depth, crash_depth):
+ broken = prefix + repeated * depth
+ details = "Compiling ({!r} + {!r} * {})".format(prefix, repeated, depth)
+ with self.assertRaises(RecursionError, msg=details):
+ with support.infinite_recursion():
+ ast.parse(broken)
+
+ check_limit("a", "()")
+ check_limit("a", ".b")
+ check_limit("a", "[0]")
+ check_limit("a", "*a")
+
+ def test_null_bytes(self):
+ with self.assertRaises(
+ SyntaxError, msg="source code string cannot contain null bytes"
+ ):
+ ast.parse("a\0b")
+
+ def assert_none_check(self, node: type[ast.AST], attr: str, source: str) -> None:
+ with self.subTest(f"{node.__name__}.{attr}"):
+ tree = ast.parse(source)
+ found = 0
+ for child in ast.walk(tree):
+ if isinstance(child, node):
+ setattr(child, attr, None)
+ found += 1
+ self.assertEqual(found, 1)
+ e = re.escape(f"field '{attr}' is required for {node.__name__}")
+ with self.assertRaisesRegex(ValueError, f"^{e}$"):
+ compile(tree, "", "exec")
+
+ # TODO: RUSTPYTHON; TypeError: expected some sort of expr, but got None
+ @unittest.expectedFailure
+ def test_none_checks(self) -> None:
+ tests = [
+ (ast.alias, "name", "import spam as SPAM"),
+ (ast.arg, "arg", "def spam(SPAM): spam"),
+ (ast.comprehension, "target", "[spam for SPAM in spam]"),
+ (ast.comprehension, "iter", "[spam for spam in SPAM]"),
+ (ast.keyword, "value", "spam(**SPAM)"),
+ (ast.match_case, "pattern", "match spam:\n case SPAM: spam"),
+ (ast.withitem, "context_expr", "with SPAM: spam"),
+ ]
+ for node, attr, source in tests:
+ self.assert_none_check(node, attr, source)
+
+
+class CopyTests(unittest.TestCase):
+ """Test copying and pickling AST nodes."""
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_pickling(self):
+ import pickle
+
+ for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+ for code in exec_tests:
+ with self.subTest(code=code, protocol=protocol):
+ tree = compile(code, "?", "exec", 0x400)
+ ast2 = pickle.loads(pickle.dumps(tree, protocol))
+ self.assertEqual(to_tuple(ast2), to_tuple(tree))
+
+ def test_copy_with_parents(self):
+ # gh-120108
+ code = """
+ ('',)
+ while i < n:
+ if ch == '':
+ ch = format[i]
+ if ch == '':
+ if freplace is None:
+ '' % getattr(object)
+ elif ch == '':
+ if zreplace is None:
+ if hasattr:
+ offset = object.utcoffset()
+ if offset is not None:
+ if offset.days < 0:
+ offset = -offset
+ h = divmod(timedelta(hours=0))
+ if u:
+ zreplace = '' % (sign,)
+ elif s:
+ zreplace = '' % (sign,)
+ else:
+ zreplace = '' % (sign,)
+ elif ch == '':
+ if Zreplace is None:
+ Zreplace = ''
+ if hasattr(object):
+ s = object.tzname()
+ if s is not None:
+ Zreplace = s.replace('')
+ newformat.append(Zreplace)
+ else:
+ push('')
+ else:
+ push(ch)
+
+ """
+ tree = ast.parse(textwrap.dedent(code))
+ for node in ast.walk(tree):
+ for child in ast.iter_child_nodes(node):
+ child.parent = node
+ try:
+ with support.infinite_recursion(200):
+ tree2 = copy.deepcopy(tree)
+ finally:
+ # Singletons like ast.Load() are shared; make sure we don't
+ # leave them mutated after this test.
+ for node in ast.walk(tree):
+ if hasattr(node, "parent"):
+ del node.parent
+
+ for node in ast.walk(tree2):
+ for child in ast.iter_child_nodes(node):
+ if hasattr(child, "parent") and not isinstance(
+ child,
+ (
+ ast.expr_context,
+ ast.boolop,
+ ast.unaryop,
+ ast.cmpop,
+ ast.operator,
+ ),
+ ):
+ self.assertEqual(to_tuple(child.parent), to_tuple(node))
+
+
+class ASTHelpers_Test(unittest.TestCase):
+ maxDiff = None
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_parse(self):
+ a = ast.parse("foo(1 + 1)")
+ b = compile("foo(1 + 1)", "", "exec", ast.PyCF_ONLY_AST)
+ self.assertEqual(ast.dump(a), ast.dump(b))
+
+ def test_parse_in_error(self):
+ try:
+ 1 / 0
+ except Exception:
+ with self.assertRaises(SyntaxError) as e:
+ ast.literal_eval(r"'\U'")
+ self.assertIsNotNone(e.exception.__context__)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_dump(self):
+ node = ast.parse('spam(eggs, "and cheese")')
+ self.assertEqual(
+ ast.dump(node),
+ "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
+ "args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])",
+ )
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
+ "Constant('and cheese')]))])",
+ )
+ self.assertEqual(
+ ast.dump(node, include_attributes=True),
+ "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "lineno=1, col_offset=0, end_lineno=1, end_col_offset=4), "
+ "args=[Name(id='eggs', ctx=Load(), lineno=1, col_offset=5, "
+ "end_lineno=1, end_col_offset=9), Constant(value='and cheese', "
+ "lineno=1, col_offset=11, end_lineno=1, end_col_offset=23)], "
+ "lineno=1, col_offset=0, end_lineno=1, end_col_offset=24), "
+ "lineno=1, col_offset=0, end_lineno=1, end_col_offset=24)])",
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_dump_indent(self):
+ node = ast.parse('spam(eggs, "and cheese")')
+ self.assertEqual(
+ ast.dump(node, indent=3),
+ """\
+Module(
+ body=[
+ Expr(
+ value=Call(
+ func=Name(id='spam', ctx=Load()),
+ args=[
+ Name(id='eggs', ctx=Load()),
+ Constant(value='and cheese')]))])""",
+ )
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False, indent="\t"),
+ """\
+Module(
+\t[
+\t\tExpr(
+\t\t\tCall(
+\t\t\t\tName('spam', Load()),
+\t\t\t\t[
+\t\t\t\t\tName('eggs', Load()),
+\t\t\t\t\tConstant('and cheese')]))])""",
+ )
+ self.assertEqual(
+ ast.dump(node, include_attributes=True, indent=3),
+ """\
+Module(
+ body=[
+ Expr(
+ value=Call(
+ func=Name(
+ id='spam',
+ ctx=Load(),
+ lineno=1,
+ col_offset=0,
+ end_lineno=1,
+ end_col_offset=4),
+ args=[
+ Name(
+ id='eggs',
+ ctx=Load(),
+ lineno=1,
+ col_offset=5,
+ end_lineno=1,
+ end_col_offset=9),
+ Constant(
+ value='and cheese',
+ lineno=1,
+ col_offset=11,
+ end_lineno=1,
+ end_col_offset=23)],
+ lineno=1,
+ col_offset=0,
+ end_lineno=1,
+ end_col_offset=24),
+ lineno=1,
+ col_offset=0,
+ end_lineno=1,
+ end_col_offset=24)])""",
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_dump_incomplete(self):
+ node = ast.Raise(lineno=3, col_offset=4)
+ self.assertEqual(ast.dump(node), "Raise()")
+ self.assertEqual(
+ ast.dump(node, include_attributes=True), "Raise(lineno=3, col_offset=4)"
+ )
+ node = ast.Raise(exc=ast.Name(id="e", ctx=ast.Load()), lineno=3, col_offset=4)
+ self.assertEqual(ast.dump(node), "Raise(exc=Name(id='e', ctx=Load()))")
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False), "Raise(Name('e', Load()))"
+ )
+ self.assertEqual(
+ ast.dump(node, include_attributes=True),
+ "Raise(exc=Name(id='e', ctx=Load()), lineno=3, col_offset=4)",
+ )
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False, include_attributes=True),
+ "Raise(Name('e', Load()), lineno=3, col_offset=4)",
+ )
+ node = ast.Raise(cause=ast.Name(id="e", ctx=ast.Load()))
+ self.assertEqual(ast.dump(node), "Raise(cause=Name(id='e', ctx=Load()))")
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False), "Raise(cause=Name('e', Load()))"
+ )
+ # Arguments:
+ node = ast.arguments(args=[ast.arg("x")])
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "arguments([], [arg('x')])",
+ )
+ node = ast.arguments(posonlyargs=[ast.arg("x")])
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "arguments([arg('x')])",
+ )
+ node = ast.arguments(posonlyargs=[ast.arg("x")], kwonlyargs=[ast.arg("y")])
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "arguments([arg('x')], kwonlyargs=[arg('y')])",
+ )
+ node = ast.arguments(args=[ast.arg("x")], kwonlyargs=[ast.arg("y")])
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "arguments([], [arg('x')], kwonlyargs=[arg('y')])",
+ )
+ node = ast.arguments()
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "arguments()",
+ )
+ # Classes:
+ node = ast.ClassDef(
+ "T",
+ [],
+ [ast.keyword("a", ast.Constant(None))],
+ [],
+ [ast.Name("dataclass", ctx=ast.Load())],
+ )
+ self.assertEqual(
+ ast.dump(node),
+ "ClassDef(name='T', keywords=[keyword(arg='a', value=Constant(value=None))], decorator_list=[Name(id='dataclass', ctx=Load())])",
+ )
+ self.assertEqual(
+ ast.dump(node, annotate_fields=False),
+ "ClassDef('T', [], [keyword('a', Constant(None))], [], [Name('dataclass', Load())])",
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_dump_show_empty(self):
+ def check_node(node, empty, full, **kwargs):
+ with self.subTest(show_empty=False):
+ self.assertEqual(
+ ast.dump(node, show_empty=False, **kwargs),
+ empty,
+ )
+ with self.subTest(show_empty=True):
+ self.assertEqual(
+ ast.dump(node, show_empty=True, **kwargs),
+ full,
+ )
+
+ def check_text(code, empty, full, **kwargs):
+ check_node(ast.parse(code), empty, full, **kwargs)
+
+ check_node(
+ ast.arguments(),
+ empty="arguments()",
+ full="arguments(posonlyargs=[], args=[], kwonlyargs=[], kw_defaults=[], defaults=[])",
+ )
+
+ check_node(
+ # Corner case: there are no real `Name` instances with `id=''`:
+ ast.Name(id="", ctx=ast.Load()),
+ empty="Name(id='', ctx=Load())",
+ full="Name(id='', ctx=Load())",
+ )
+
+ check_node(
+ ast.MatchSingleton(value=None),
+ empty="MatchSingleton(value=None)",
+ full="MatchSingleton(value=None)",
+ )
+
+ check_node(
+ ast.MatchSingleton(value=[]),
+ empty="MatchSingleton(value=[])",
+ full="MatchSingleton(value=[])",
+ )
+
+ check_node(
+ ast.Constant(value=None),
+ empty="Constant(value=None)",
+ full="Constant(value=None)",
+ )
+
+ check_node(
+ ast.Constant(value=[]),
+ empty="Constant(value=[])",
+ full="Constant(value=[])",
+ )
+
+ check_node(
+ ast.Constant(value=""),
+ empty="Constant(value='')",
+ full="Constant(value='')",
+ )
+
+ check_text(
+ "def a(b: int = 0, *, c): ...",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))])])",
+ full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))], decorator_list=[], type_params=[])], type_ignores=[])",
+ )
+
+ check_text(
+ "def a(b: int = 0, *, c): ...",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)])",
+ full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], decorator_list=[], type_params=[], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)], type_ignores=[])",
+ include_attributes=True,
+ )
+
+ check_text(
+ 'spam(eggs, "and cheese")',
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])",
+ full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')], keywords=[]))], type_ignores=[])",
+ )
+
+ check_text(
+ 'spam(eggs, text="and cheese")',
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))])",
+ full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))], type_ignores=[])",
+ )
+
+ check_text(
+ "import _ast as ast; from module import sub",
+ empty="Module(body=[Import(names=[alias(name='_ast', asname='ast')]), ImportFrom(module='module', names=[alias(name='sub')], level=0)])",
+ full="Module(body=[Import(names=[alias(name='_ast', asname='ast')]), ImportFrom(module='module', names=[alias(name='sub')], level=0)], type_ignores=[])",
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_copy_location(self):
+ src = ast.parse("1 + 1", mode="eval")
+ src.body.right = ast.copy_location(ast.Constant(2), src.body.right)
+ self.assertEqual(
+ ast.dump(src, include_attributes=True),
+ "Expression(body=BinOp(left=Constant(value=1, lineno=1, col_offset=0, "
+ "end_lineno=1, end_col_offset=1), op=Add(), right=Constant(value=2, "
+ "lineno=1, col_offset=4, end_lineno=1, end_col_offset=5), lineno=1, "
+ "col_offset=0, end_lineno=1, end_col_offset=5))",
+ )
+ func = ast.Name("spam", ast.Load())
+ src = ast.Call(
+ col_offset=1, lineno=1, end_lineno=1, end_col_offset=1, func=func
+ )
+ new = ast.copy_location(src, ast.Call(col_offset=None, lineno=None, func=func))
+ self.assertIsNone(new.end_lineno)
+ self.assertIsNone(new.end_col_offset)
+ self.assertEqual(new.lineno, 1)
+ self.assertEqual(new.col_offset, 1)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_fix_missing_locations(self):
+ src = ast.parse('write("spam")')
+ src.body.append(
+ ast.Expr(ast.Call(ast.Name("spam", ast.Load()), [ast.Constant("eggs")], []))
+ )
+ self.assertEqual(src, ast.fix_missing_locations(src))
+ self.maxDiff = None
+ self.assertEqual(
+ ast.dump(src, include_attributes=True),
+ "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
+ "lineno=1, col_offset=0, end_lineno=1, end_col_offset=5), "
+ "args=[Constant(value='spam', lineno=1, col_offset=6, end_lineno=1, "
+ "end_col_offset=12)], lineno=1, col_offset=0, end_lineno=1, "
+ "end_col_offset=13), lineno=1, col_offset=0, end_lineno=1, "
+ "end_col_offset=13), Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "lineno=1, col_offset=0, end_lineno=1, end_col_offset=0), "
+ "args=[Constant(value='eggs', lineno=1, col_offset=0, end_lineno=1, "
+ "end_col_offset=0)], lineno=1, col_offset=0, end_lineno=1, "
+ "end_col_offset=0), lineno=1, col_offset=0, end_lineno=1, end_col_offset=0)])",
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_increment_lineno(self):
+ src = ast.parse("1 + 1", mode="eval")
+ self.assertEqual(ast.increment_lineno(src, n=3), src)
+ self.assertEqual(
+ ast.dump(src, include_attributes=True),
+ "Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, "
+ "end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, "
+ "lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, "
+ "col_offset=0, end_lineno=4, end_col_offset=5))",
+ )
+ # issue10869: do not increment lineno of root twice
+ src = ast.parse("1 + 1", mode="eval")
+ self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
+ self.assertEqual(
+ ast.dump(src, include_attributes=True),
+ "Expression(body=BinOp(left=Constant(value=1, lineno=4, col_offset=0, "
+ "end_lineno=4, end_col_offset=1), op=Add(), right=Constant(value=1, "
+ "lineno=4, col_offset=4, end_lineno=4, end_col_offset=5), lineno=4, "
+ "col_offset=0, end_lineno=4, end_col_offset=5))",
+ )
+ src = ast.Call(
+ func=ast.Name("test", ast.Load()), args=[], keywords=[], lineno=1
+ )
+ self.assertEqual(ast.increment_lineno(src).lineno, 2)
+ self.assertIsNone(ast.increment_lineno(src).end_lineno)
+
+ # TODO: RUSTPYTHON; IndexError: index out of range
+ @unittest.expectedFailure
+ def test_increment_lineno_on_module(self):
+ src = ast.parse(
+ dedent("""\
+ a = 1
+ b = 2 # type: ignore
+ c = 3
+ d = 4 # type: ignore@tag
+ """),
+ type_comments=True,
+ )
+ ast.increment_lineno(src, n=5)
+ self.assertEqual(src.type_ignores[0].lineno, 7)
+ self.assertEqual(src.type_ignores[1].lineno, 9)
+ self.assertEqual(src.type_ignores[1].tag, "@tag")
+
+ def test_iter_fields(self):
+ node = ast.parse("foo()", mode="eval")
+ d = dict(ast.iter_fields(node.body))
+ self.assertEqual(d.pop("func").id, "foo")
+ self.assertEqual(d, {"keywords": [], "args": []})
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_iter_child_nodes(self):
+ node = ast.parse("spam(23, 42, eggs='leek')", mode="eval")
+ self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
+ iterator = ast.iter_child_nodes(node.body)
+ self.assertEqual(next(iterator).id, "spam")
+ self.assertEqual(next(iterator).value, 23)
+ self.assertEqual(next(iterator).value, 42)
+ self.assertEqual(
+ ast.dump(next(iterator)),
+ "keyword(arg='eggs', value=Constant(value='leek'))",
+ )
+
+ def test_get_docstring(self):
+ node = ast.parse('"""line one\n line two"""')
+ self.assertEqual(ast.get_docstring(node), "line one\nline two")
+
+ node = ast.parse('class foo:\n """line one\n line two"""')
+ self.assertEqual(ast.get_docstring(node.body[0]), "line one\nline two")
+
+ node = ast.parse('def foo():\n """line one\n line two"""')
+ self.assertEqual(ast.get_docstring(node.body[0]), "line one\nline two")
+
+ node = ast.parse('async def foo():\n """spam\n ham"""')
+ self.assertEqual(ast.get_docstring(node.body[0]), "spam\nham")
+
+ node = ast.parse('async def foo():\n """spam\n ham"""')
+ self.assertEqual(ast.get_docstring(node.body[0], clean=False), "spam\n ham")
+
+ node = ast.parse("x")
+ self.assertRaises(TypeError, ast.get_docstring, node.body[0])
+
+ def test_get_docstring_none(self):
+ self.assertIsNone(ast.get_docstring(ast.parse("")))
+ node = ast.parse('x = "not docstring"')
+ self.assertIsNone(ast.get_docstring(node))
+ node = ast.parse("def foo():\n pass")
+ self.assertIsNone(ast.get_docstring(node))
+
+ node = ast.parse("class foo:\n pass")
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+ node = ast.parse('class foo:\n x = "not docstring"')
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+ node = ast.parse("class foo:\n def bar(self): pass")
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+
+ node = ast.parse("def foo():\n pass")
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+ node = ast.parse('def foo():\n x = "not docstring"')
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+
+ node = ast.parse("async def foo():\n pass")
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+ node = ast.parse('async def foo():\n x = "not docstring"')
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+
+ node = ast.parse("async def foo():\n 42")
+ self.assertIsNone(ast.get_docstring(node.body[0]))
+
+ def test_multi_line_docstring_col_offset_and_lineno_issue16806(self):
+ node = ast.parse(
+ '"""line one\nline two"""\n\n'
+ 'def foo():\n """line one\n line two"""\n\n'
+ ' def bar():\n """line one\n line two"""\n'
+ ' """line one\n line two"""\n'
+ '"""line one\nline two"""\n\n'
+ )
+ self.assertEqual(node.body[0].col_offset, 0)
+ self.assertEqual(node.body[0].lineno, 1)
+ self.assertEqual(node.body[1].body[0].col_offset, 2)
+ self.assertEqual(node.body[1].body[0].lineno, 5)
+ self.assertEqual(node.body[1].body[1].body[0].col_offset, 4)
+ self.assertEqual(node.body[1].body[1].body[0].lineno, 9)
+ self.assertEqual(node.body[1].body[2].col_offset, 2)
+ self.assertEqual(node.body[1].body[2].lineno, 11)
+ self.assertEqual(node.body[2].col_offset, 0)
+ self.assertEqual(node.body[2].lineno, 13)
+
+ def test_elif_stmt_start_position(self):
+ node = ast.parse("if a:\n pass\nelif b:\n pass\n")
+ elif_stmt = node.body[0].orelse[0]
+ self.assertEqual(elif_stmt.lineno, 3)
+ self.assertEqual(elif_stmt.col_offset, 0)
+
+ def test_elif_stmt_start_position_with_else(self):
+ node = ast.parse("if a:\n pass\nelif b:\n pass\nelse:\n pass\n")
+ elif_stmt = node.body[0].orelse[0]
+ self.assertEqual(elif_stmt.lineno, 3)
+ self.assertEqual(elif_stmt.col_offset, 0)
+
+ def test_starred_expr_end_position_within_call(self):
+ node = ast.parse("f(*[0, 1])")
+ starred_expr = node.body[0].value.args[0]
+ self.assertEqual(starred_expr.end_lineno, 1)
+ self.assertEqual(starred_expr.end_col_offset, 9)
+
+ def test_literal_eval(self):
+ self.assertEqual(ast.literal_eval("[1, 2, 3]"), [1, 2, 3])
+ self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
+ self.assertEqual(ast.literal_eval("(True, False, None)"), (True, False, None))
+ self.assertEqual(ast.literal_eval("{1, 2, 3}"), {1, 2, 3})
+ self.assertEqual(ast.literal_eval('b"hi"'), b"hi")
+ self.assertEqual(ast.literal_eval("set()"), set())
+ self.assertRaises(ValueError, ast.literal_eval, "foo()")
+ self.assertEqual(ast.literal_eval("6"), 6)
+ self.assertEqual(ast.literal_eval("+6"), 6)
+ self.assertEqual(ast.literal_eval("-6"), -6)
+ self.assertEqual(ast.literal_eval("3.25"), 3.25)
+ self.assertEqual(ast.literal_eval("+3.25"), 3.25)
+ self.assertEqual(ast.literal_eval("-3.25"), -3.25)
+ self.assertEqual(repr(ast.literal_eval("-0.0")), "-0.0")
+ self.assertRaises(ValueError, ast.literal_eval, "++6")
+ self.assertRaises(ValueError, ast.literal_eval, "+True")
+ self.assertRaises(ValueError, ast.literal_eval, "2+3")
+
+ # TODO: RUSTPYTHON; SyntaxError not raised
+ @unittest.expectedFailure
+ def test_literal_eval_str_int_limit(self):
+ with support.adjust_int_max_str_digits(4000):
+ ast.literal_eval("3" * 4000) # no error
+ with self.assertRaises(SyntaxError) as err_ctx:
+ ast.literal_eval("3" * 4001)
+ self.assertIn("Exceeds the limit ", str(err_ctx.exception))
+ self.assertIn(" Consider hexadecimal ", str(err_ctx.exception))
+
+ def test_literal_eval_complex(self):
+ # Issue #4907
+ self.assertEqual(ast.literal_eval("6j"), 6j)
+ self.assertEqual(ast.literal_eval("-6j"), -6j)
+ self.assertEqual(ast.literal_eval("6.75j"), 6.75j)
+ self.assertEqual(ast.literal_eval("-6.75j"), -6.75j)
+ self.assertEqual(ast.literal_eval("3+6j"), 3 + 6j)
+ self.assertEqual(ast.literal_eval("-3+6j"), -3 + 6j)
+ self.assertEqual(ast.literal_eval("3-6j"), 3 - 6j)
+ self.assertEqual(ast.literal_eval("-3-6j"), -3 - 6j)
+ self.assertEqual(ast.literal_eval("3.25+6.75j"), 3.25 + 6.75j)
+ self.assertEqual(ast.literal_eval("-3.25+6.75j"), -3.25 + 6.75j)
+ self.assertEqual(ast.literal_eval("3.25-6.75j"), 3.25 - 6.75j)
+ self.assertEqual(ast.literal_eval("-3.25-6.75j"), -3.25 - 6.75j)
+ self.assertEqual(ast.literal_eval("(3+6j)"), 3 + 6j)
+ self.assertRaises(ValueError, ast.literal_eval, "-6j+3")
+ self.assertRaises(ValueError, ast.literal_eval, "-6j+3j")
+ self.assertRaises(ValueError, ast.literal_eval, "3+-6j")
+ self.assertRaises(ValueError, ast.literal_eval, "3+(0+6j)")
+ self.assertRaises(ValueError, ast.literal_eval, "-(3+6j)")
+
+ def test_literal_eval_malformed_dict_nodes(self):
+ malformed = ast.Dict(
+ keys=[ast.Constant(1), ast.Constant(2)], values=[ast.Constant(3)]
+ )
+ self.assertRaises(ValueError, ast.literal_eval, malformed)
+ malformed = ast.Dict(
+ keys=[ast.Constant(1)], values=[ast.Constant(2), ast.Constant(3)]
+ )
+ self.assertRaises(ValueError, ast.literal_eval, malformed)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_literal_eval_trailing_ws(self):
+ self.assertEqual(ast.literal_eval(" -1"), -1)
+ self.assertEqual(ast.literal_eval("\t\t-1"), -1)
+ self.assertEqual(ast.literal_eval(" \t -1"), -1)
+ self.assertRaises(IndentationError, ast.literal_eval, "\n -1")
+
+ def test_literal_eval_malformed_lineno(self):
+ msg = r"malformed node or string on line 3:"
+ with self.assertRaisesRegex(ValueError, msg):
+ ast.literal_eval("{'a': 1,\n'b':2,\n'c':++3,\n'd':4}")
+
+ node = ast.UnaryOp(ast.UAdd(), ast.UnaryOp(ast.UAdd(), ast.Constant(6)))
+ self.assertIsNone(getattr(node, "lineno", None))
+ msg = r"malformed node or string:"
+ with self.assertRaisesRegex(ValueError, msg):
+ ast.literal_eval(node)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_literal_eval_syntax_errors(self):
+ with self.assertRaisesRegex(SyntaxError, "unexpected indent"):
+ ast.literal_eval(r"""
+ \
+ (\
+ \ """)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_bad_integer(self):
+ # issue13436: Bad error message with invalid numeric values
+ body = [
+ ast.ImportFrom(
+ module="time",
+ names=[ast.alias(name="sleep")],
+ level=None,
+ lineno=None,
+ col_offset=None,
+ )
+ ]
+ mod = ast.Module(body, [])
+ with self.assertRaises(ValueError) as cm:
+ compile(mod, "test", "exec")
+ self.assertIn("invalid integer value: None", str(cm.exception))
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_level_as_none(self):
+ body = [
+ ast.ImportFrom(
+ module="time",
+ names=[ast.alias(name="sleep", lineno=0, col_offset=0)],
+ level=None,
+ lineno=0,
+ col_offset=0,
+ )
+ ]
+ mod = ast.Module(body, [])
+ code = compile(mod, "test", "exec")
+ ns = {}
+ exec(code, ns)
+ self.assertIn("sleep", ns)
+
+ # TODO: RUSTPYTHON
+ @unittest.skip("TODO: RUSTPYTHON; crash")
+ def test_recursion_direct(self):
+ e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0, operand=ast.Constant(1))
+ e.operand = e
+ with self.assertRaises(RecursionError):
+ with support.infinite_recursion():
+ compile(ast.Expression(e), "", "eval")
+
+ # TODO: RUSTPYTHON
+ @unittest.skip("TODO: RUSTPYTHON; crash")
+ def test_recursion_indirect(self):
+ e = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0, operand=ast.Constant(1))
+ f = ast.UnaryOp(op=ast.Not(), lineno=0, col_offset=0, operand=ast.Constant(1))
+ e.operand = f
+ f.operand = e
+ with self.assertRaises(RecursionError):
+ with support.infinite_recursion():
+ compile(ast.Expression(e), "", "eval")
+
+
+class ASTValidatorTests(unittest.TestCase):
+ def mod(self, mod, msg=None, mode="exec", *, exc=ValueError):
+ mod.lineno = mod.col_offset = 0
+ ast.fix_missing_locations(mod)
+ if msg is None:
+ compile(mod, "", mode)
+ else:
+ with self.assertRaises(exc) as cm:
+ compile(mod, "", mode)
+ self.assertIn(msg, str(cm.exception))
+
+ def expr(self, node, msg=None, *, exc=ValueError):
+ mod = ast.Module([ast.Expr(node)], [])
+ self.mod(mod, msg, exc=exc)
+
+ def stmt(self, stmt, msg=None):
+ mod = ast.Module([stmt], [])
+ self.mod(mod, msg)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_module(self):
+ m = ast.Interactive([ast.Expr(ast.Name("x", ast.Store()))])
+ self.mod(m, "must have Load context", "single")
+ m = ast.Expression(ast.Name("x", ast.Store()))
+ self.mod(m, "must have Load context", "eval")
+
+ def _check_arguments(self, fac, check):
+ def arguments(
+ args=None,
+ posonlyargs=None,
+ vararg=None,
+ kwonlyargs=None,
+ kwarg=None,
+ defaults=None,
+ kw_defaults=None,
+ ):
+ if args is None:
+ args = []
+ if posonlyargs is None:
+ posonlyargs = []
+ if kwonlyargs is None:
+ kwonlyargs = []
+ if defaults is None:
+ defaults = []
+ if kw_defaults is None:
+ kw_defaults = []
+ args = ast.arguments(
+ args, posonlyargs, vararg, kwonlyargs, kw_defaults, kwarg, defaults
+ )
+ return fac(args)
+
+ args = [ast.arg("x", ast.Name("x", ast.Store()))]
+ check(arguments(args=args), "must have Load context")
+ check(arguments(posonlyargs=args), "must have Load context")
+ check(arguments(kwonlyargs=args), "must have Load context")
+ check(
+ arguments(defaults=[ast.Constant(3)]), "more positional defaults than args"
+ )
+ check(
+ arguments(kw_defaults=[ast.Constant(4)]),
+ "length of kwonlyargs is not the same as kw_defaults",
+ )
+ args = [ast.arg("x", ast.Name("x", ast.Load()))]
+ check(
+ arguments(args=args, defaults=[ast.Name("x", ast.Store())]),
+ "must have Load context",
+ )
+ args = [
+ ast.arg("a", ast.Name("x", ast.Load())),
+ ast.arg("b", ast.Name("y", ast.Load())),
+ ]
+ check(
+ arguments(kwonlyargs=args, kw_defaults=[None, ast.Name("x", ast.Store())]),
+ "must have Load context",
+ )
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_funcdef(self):
+ a = ast.arguments([], [], None, [], [], None, [])
+ f = ast.FunctionDef("x", a, [], [], None, None, [])
+ self.stmt(f, "empty body on FunctionDef")
+ f = ast.FunctionDef(
+ "x", a, [ast.Pass()], [ast.Name("x", ast.Store())], None, None, []
+ )
+ self.stmt(f, "must have Load context")
+ f = ast.FunctionDef(
+ "x", a, [ast.Pass()], [], ast.Name("x", ast.Store()), None, []
+ )
+ self.stmt(f, "must have Load context")
+ f = ast.FunctionDef("x", ast.arguments(), [ast.Pass()])
+ self.stmt(f)
+
+ def fac(args):
+ return ast.FunctionDef("x", args, [ast.Pass()], [], None, None, [])
+
+ self._check_arguments(fac, self.stmt)
+
+ # TODO: RUSTPYTHON; called `Result::unwrap()` on an `Err` value: StackUnderflow
+ '''
+ def test_funcdef_pattern_matching(self):
+ # gh-104799: New fields on FunctionDef should be added at the end
+ def matcher(node):
+ match node:
+ case ast.FunctionDef(
+ "foo",
+ ast.arguments(args=[ast.arg("bar")]),
+ [ast.Pass()],
+ [ast.Name("capybara", ast.Load())],
+ ast.Name("pacarana", ast.Load()),
+ ):
+ return True
+ case _:
+ return False
+
+ code = """
+ @capybara
+ def foo(bar) -> pacarana:
+ pass
+ """
+ source = ast.parse(textwrap.dedent(code))
+ funcdef = source.body[0]
+ self.assertIsInstance(funcdef, ast.FunctionDef)
+ self.assertTrue(matcher(funcdef))
+ '''
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_classdef(self):
+ def cls(
+ bases=None, keywords=None, body=None, decorator_list=None, type_params=None
+ ):
+ if bases is None:
+ bases = []
+ if keywords is None:
+ keywords = []
+ if body is None:
+ body = [ast.Pass()]
+ if decorator_list is None:
+ decorator_list = []
+ if type_params is None:
+ type_params = []
+ return ast.ClassDef(
+ "myclass", bases, keywords, body, decorator_list, type_params
+ )
+
+ self.stmt(cls(bases=[ast.Name("x", ast.Store())]), "must have Load context")
+ self.stmt(
+ cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store()))]),
+ "must have Load context",
+ )
+ self.stmt(cls(body=[]), "empty body on ClassDef")
+ self.stmt(cls(body=[None]), "None disallowed")
+ self.stmt(
+ cls(decorator_list=[ast.Name("x", ast.Store())]), "must have Load context"
+ )
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_delete(self):
+ self.stmt(ast.Delete([]), "empty targets on Delete")
+ self.stmt(ast.Delete([None]), "None disallowed")
+ self.stmt(ast.Delete([ast.Name("x", ast.Load())]), "must have Del context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_assign(self):
+ self.stmt(ast.Assign([], ast.Constant(3)), "empty targets on Assign")
+ self.stmt(ast.Assign([None], ast.Constant(3)), "None disallowed")
+ self.stmt(
+ ast.Assign([ast.Name("x", ast.Load())], ast.Constant(3)),
+ "must have Store context",
+ )
+ self.stmt(
+ ast.Assign([ast.Name("x", ast.Store())], ast.Name("y", ast.Store())),
+ "must have Load context",
+ )
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_augassign(self):
+ aug = ast.AugAssign(
+ ast.Name("x", ast.Load()), ast.Add(), ast.Name("y", ast.Load())
+ )
+ self.stmt(aug, "must have Store context")
+ aug = ast.AugAssign(
+ ast.Name("x", ast.Store()), ast.Add(), ast.Name("y", ast.Store())
+ )
+ self.stmt(aug, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_for(self):
+ x = ast.Name("x", ast.Store())
+ y = ast.Name("y", ast.Load())
+ p = ast.Pass()
+ self.stmt(ast.For(x, y, [], []), "empty body on For")
+ self.stmt(
+ ast.For(ast.Name("x", ast.Load()), y, [p], []), "must have Store context"
+ )
+ self.stmt(
+ ast.For(x, ast.Name("y", ast.Store()), [p], []), "must have Load context"
+ )
+ e = ast.Expr(ast.Name("x", ast.Store()))
+ self.stmt(ast.For(x, y, [e], []), "must have Load context")
+ self.stmt(ast.For(x, y, [p], [e]), "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_while(self):
+ self.stmt(ast.While(ast.Constant(3), [], []), "empty body on While")
+ self.stmt(
+ ast.While(ast.Name("x", ast.Store()), [ast.Pass()], []),
+ "must have Load context",
+ )
+ self.stmt(
+ ast.While(
+ ast.Constant(3), [ast.Pass()], [ast.Expr(ast.Name("x", ast.Store()))]
+ ),
+ "must have Load context",
+ )
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_if(self):
+ self.stmt(ast.If(ast.Constant(3), [], []), "empty body on If")
+ i = ast.If(ast.Name("x", ast.Store()), [ast.Pass()], [])
+ self.stmt(i, "must have Load context")
+ i = ast.If(ast.Constant(3), [ast.Expr(ast.Name("x", ast.Store()))], [])
+ self.stmt(i, "must have Load context")
+ i = ast.If(
+ ast.Constant(3), [ast.Pass()], [ast.Expr(ast.Name("x", ast.Store()))]
+ )
+ self.stmt(i, "must have Load context")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_with(self):
+ p = ast.Pass()
+ self.stmt(ast.With([], [p]), "empty items on With")
+ i = ast.withitem(ast.Constant(3), None)
+ self.stmt(ast.With([i], []), "empty body on With")
+ i = ast.withitem(ast.Name("x", ast.Store()), None)
+ self.stmt(ast.With([i], [p]), "must have Load context")
+ i = ast.withitem(ast.Constant(3), ast.Name("x", ast.Load()))
+ self.stmt(ast.With([i], [p]), "must have Store context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_raise(self):
+ r = ast.Raise(None, ast.Constant(3))
+ self.stmt(r, "Raise with cause but no exception")
+ r = ast.Raise(ast.Name("x", ast.Store()), None)
+ self.stmt(r, "must have Load context")
+ r = ast.Raise(ast.Constant(4), ast.Name("x", ast.Store()))
+ self.stmt(r, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_try(self):
+ p = ast.Pass()
+ t = ast.Try([], [], [], [p])
+ self.stmt(t, "empty body on Try")
+ t = ast.Try([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
+ self.stmt(t, "must have Load context")
+ t = ast.Try([p], [], [], [])
+ self.stmt(t, "Try has neither except handlers nor finalbody")
+ t = ast.Try([p], [], [p], [p])
+ self.stmt(t, "Try has orelse but no except handlers")
+ t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], [])
+ self.stmt(t, "empty body on ExceptHandler")
+ e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
+ self.stmt(ast.Try([p], e, [], []), "must have Load context")
+ e = [ast.ExceptHandler(None, "x", [p])]
+ t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
+ self.stmt(t, "must have Load context")
+ t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
+ self.stmt(t, "must have Load context")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_try_star(self):
+ p = ast.Pass()
+ t = ast.TryStar([], [], [], [p])
+ self.stmt(t, "empty body on TryStar")
+ t = ast.TryStar([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
+ self.stmt(t, "must have Load context")
+ t = ast.TryStar([p], [], [], [])
+ self.stmt(t, "TryStar has neither except handlers nor finalbody")
+ t = ast.TryStar([p], [], [p], [p])
+ self.stmt(t, "TryStar has orelse but no except handlers")
+ t = ast.TryStar([p], [ast.ExceptHandler(None, "x", [])], [], [])
+ self.stmt(t, "empty body on ExceptHandler")
+ e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
+ self.stmt(ast.TryStar([p], e, [], []), "must have Load context")
+ e = [ast.ExceptHandler(None, "x", [p])]
+ t = ast.TryStar([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
+ self.stmt(t, "must have Load context")
+ t = ast.TryStar([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
+ self.stmt(t, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_assert(self):
+ self.stmt(
+ ast.Assert(ast.Name("x", ast.Store()), None), "must have Load context"
+ )
+ assrt = ast.Assert(ast.Name("x", ast.Load()), ast.Name("y", ast.Store()))
+ self.stmt(assrt, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_import(self):
+ self.stmt(ast.Import([]), "empty names on Import")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_importfrom(self):
+ imp = ast.ImportFrom(None, [ast.alias("x", None)], -42)
+ self.stmt(imp, "Negative ImportFrom level")
+ self.stmt(ast.ImportFrom(None, [], 0), "empty names on ImportFrom")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_global(self):
+ self.stmt(ast.Global([]), "empty names on Global")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_nonlocal(self):
+ self.stmt(ast.Nonlocal([]), "empty names on Nonlocal")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_expr(self):
+ e = ast.Expr(ast.Name("x", ast.Store()))
+ self.stmt(e, "must have Load context")
+
+ # TODO: RUSTPYTHON
+ @unittest.skip("TODO: RUSTPYTHON; called `Option::unwrap()` on a `None` value")
+ def test_boolop(self):
+ b = ast.BoolOp(ast.And(), [])
+ self.expr(b, "less than 2 values")
+ b = ast.BoolOp(ast.And(), [ast.Constant(3)])
+ self.expr(b, "less than 2 values")
+ b = ast.BoolOp(ast.And(), [ast.Constant(4), None])
+ self.expr(b, "None disallowed")
+ b = ast.BoolOp(ast.And(), [ast.Constant(4), ast.Name("x", ast.Store())])
+ self.expr(b, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_unaryop(self):
+ u = ast.UnaryOp(ast.Not(), ast.Name("x", ast.Store()))
+ self.expr(u, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_lambda(self):
+ a = ast.arguments([], [], None, [], [], None, [])
+ self.expr(ast.Lambda(a, ast.Name("x", ast.Store())), "must have Load context")
+
+ def fac(args):
+ return ast.Lambda(args, ast.Name("x", ast.Load()))
+
+ self._check_arguments(fac, self.expr)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_ifexp(self):
+ l = ast.Name("x", ast.Load())
+ s = ast.Name("y", ast.Store())
+ for args in (s, l, l), (l, s, l), (l, l, s):
+ self.expr(ast.IfExp(*args), "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_dict(self):
+ d = ast.Dict([], [ast.Name("x", ast.Load())])
+ self.expr(d, "same number of keys as values")
+ d = ast.Dict([ast.Name("x", ast.Load())], [None])
+ self.expr(d, "None disallowed")
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_set(self):
+ self.expr(ast.Set([None]), "None disallowed")
+ s = ast.Set([ast.Name("x", ast.Store())])
+ self.expr(s, "must have Load context")
+
+ def _check_comprehension(self, fac):
+ self.expr(fac([]), "comprehension with no generators")
+ g = ast.comprehension(
+ ast.Name("x", ast.Load()), ast.Name("x", ast.Load()), [], 0
+ )
+ self.expr(fac([g]), "must have Store context")
+ g = ast.comprehension(
+ ast.Name("x", ast.Store()), ast.Name("x", ast.Store()), [], 0
+ )
+ self.expr(fac([g]), "must have Load context")
+ x = ast.Name("x", ast.Store())
+ y = ast.Name("y", ast.Load())
+ g = ast.comprehension(x, y, [None], 0)
+ self.expr(fac([g]), "None disallowed")
+ g = ast.comprehension(x, y, [ast.Name("x", ast.Store())], 0)
+ self.expr(fac([g]), "must have Load context")
+
+ def _simple_comp(self, fac):
+ g = ast.comprehension(
+ ast.Name("x", ast.Store()), ast.Name("x", ast.Load()), [], 0
+ )
+ self.expr(fac(ast.Name("x", ast.Store()), [g]), "must have Load context")
+
+ def wrap(gens):
+ return fac(ast.Name("x", ast.Store()), gens)
+
+ self._check_comprehension(wrap)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_listcomp(self):
+ self._simple_comp(ast.ListComp)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_setcomp(self):
+ self._simple_comp(ast.SetComp)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_generatorexp(self):
+ self._simple_comp(ast.GeneratorExp)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_dictcomp(self):
+ g = ast.comprehension(
+ ast.Name("y", ast.Store()), ast.Name("p", ast.Load()), [], 0
+ )
+ c = ast.DictComp(ast.Name("x", ast.Store()), ast.Name("y", ast.Load()), [g])
+ self.expr(c, "must have Load context")
+ c = ast.DictComp(ast.Name("x", ast.Load()), ast.Name("y", ast.Store()), [g])
+ self.expr(c, "must have Load context")
+
+ def factory(comps):
+ k = ast.Name("x", ast.Load())
+ v = ast.Name("y", ast.Load())
+ return ast.DictComp(k, v, comps)
+
+ self._check_comprehension(factory)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_yield(self):
+ self.expr(ast.Yield(ast.Name("x", ast.Store())), "must have Load")
+ self.expr(ast.YieldFrom(ast.Name("x", ast.Store())), "must have Load")
+
+ # TODO: RUSTPYTHON
+ @unittest.skip("TODO: RUSTPYTHON; thread 'main' panicked")
+ def test_compare(self):
+ left = ast.Name("x", ast.Load())
+ comp = ast.Compare(left, [ast.In()], [])
+ self.expr(comp, "no comparators")
+ comp = ast.Compare(left, [ast.In()], [ast.Constant(4), ast.Constant(5)])
+ self.expr(comp, "different number of comparators and operands")
+ comp = ast.Compare(ast.Constant("blah"), [ast.In()], [left])
+ self.expr(comp)
+ comp = ast.Compare(left, [ast.In()], [ast.Constant("blah")])
+ self.expr(comp)
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_call(self):
+ func = ast.Name("x", ast.Load())
+ args = [ast.Name("y", ast.Load())]
+ keywords = [ast.keyword("w", ast.Name("z", ast.Load()))]
+ call = ast.Call(ast.Name("x", ast.Store()), args, keywords)
+ self.expr(call, "must have Load context")
+ call = ast.Call(func, [None], keywords)
+ self.expr(call, "None disallowed")
+ bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store()))]
+ call = ast.Call(func, args, bad_keywords)
+ self.expr(call, "must have Load context")
+
+ def test_num(self):
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import Num
+
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+
+ class subint(int):
+ pass
+
+ class subfloat(float):
+ pass
+
+ class subcomplex(complex):
+ pass
+
+ for obj in "0", "hello":
+ self.expr(ast.Num(obj))
+ for obj in subint(), subfloat(), subcomplex():
+ self.expr(ast.Num(obj), "invalid type", exc=TypeError)
+
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ "ast.Num is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ ],
+ )
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_attribute(self):
+ attr = ast.Attribute(ast.Name("x", ast.Store()), "y", ast.Load())
+ self.expr(attr, "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_subscript(self):
+ sub = ast.Subscript(ast.Name("x", ast.Store()), ast.Constant(3), ast.Load())
+ self.expr(sub, "must have Load context")
+ x = ast.Name("x", ast.Load())
+ sub = ast.Subscript(x, ast.Name("y", ast.Store()), ast.Load())
+ self.expr(sub, "must have Load context")
+ s = ast.Name("x", ast.Store())
+ for args in (s, None, None), (None, s, None), (None, None, s):
+ sl = ast.Slice(*args)
+ self.expr(ast.Subscript(x, sl, ast.Load()), "must have Load context")
+ sl = ast.Tuple([], ast.Load())
+ self.expr(ast.Subscript(x, sl, ast.Load()))
+ sl = ast.Tuple([s], ast.Load())
+ self.expr(ast.Subscript(x, sl, ast.Load()), "must have Load context")
+
+ # TODO: RUSTPYTHON; ValueError not raised
+ @unittest.expectedFailure
+ def test_starred(self):
+ left = ast.List(
+ [ast.Starred(ast.Name("x", ast.Load()), ast.Store())], ast.Store()
+ )
+ assign = ast.Assign([left], ast.Constant(4))
+ self.stmt(assign, "must have Store context")
+
+ def _sequence(self, fac):
+ self.expr(fac([None], ast.Load()), "None disallowed")
+ self.expr(
+ fac([ast.Name("x", ast.Store())], ast.Load()), "must have Load context"
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_list(self):
+ self._sequence(ast.List)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_tuple(self):
+ self._sequence(ast.Tuple)
+
+ def test_nameconstant(self):
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("ignore", "", DeprecationWarning)
+ from ast import NameConstant
+
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+ self.expr(ast.NameConstant(4))
+
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "ast.NameConstant is deprecated and will be removed in Python 3.14; use ast.Constant instead",
+ ],
+ )
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ @support.requires_resource("cpu")
+ def test_stdlib_validates(self):
+ stdlib = os.path.dirname(ast.__file__)
+ tests = [fn for fn in os.listdir(stdlib) if fn.endswith(".py")]
+ tests.extend(["test/test_grammar.py", "test/test_unpack_ex.py"])
+ for module in tests:
+ with self.subTest(module):
+ fn = os.path.join(stdlib, module)
+ with open(fn, "r", encoding="utf-8") as fp:
+ source = fp.read()
+ mod = ast.parse(source, fn)
+ compile(mod, fn, "exec")
+
+ constant_1 = ast.Constant(1)
+ pattern_1 = ast.MatchValue(constant_1)
+
+ constant_x = ast.Constant("x")
+ pattern_x = ast.MatchValue(constant_x)
+
+ constant_true = ast.Constant(True)
+ pattern_true = ast.MatchSingleton(True)
+
+ name_carter = ast.Name("carter", ast.Load())
+
+ _MATCH_PATTERNS = [
+ ast.MatchValue(
+ ast.Attribute(
+ ast.Attribute(ast.Name("x", ast.Store()), "y", ast.Load()),
+ "z",
+ ast.Load(),
+ )
+ ),
+ ast.MatchValue(
+ ast.Attribute(
+ ast.Attribute(ast.Name("x", ast.Load()), "y", ast.Store()),
+ "z",
+ ast.Load(),
+ )
+ ),
+ ast.MatchValue(ast.Constant(...)),
+ ast.MatchValue(ast.Constant(True)),
+ ast.MatchValue(ast.Constant((1, 2, 3))),
+ ast.MatchSingleton("string"),
+ ast.MatchSequence([ast.MatchSingleton("string")]),
+ ast.MatchSequence([ast.MatchSequence([ast.MatchSingleton("string")])]),
+ ast.MatchMapping([constant_1, constant_true], [pattern_x]),
+ ast.MatchMapping(
+ [constant_true, constant_1], [pattern_x, pattern_1], rest="True"
+ ),
+ ast.MatchMapping(
+ [constant_true, ast.Starred(ast.Name("lol", ast.Load()), ast.Load())],
+ [pattern_x, pattern_1],
+ rest="legit",
+ ),
+ ast.MatchClass(
+ ast.Attribute(ast.Attribute(constant_x, "y", ast.Load()), "z", ast.Load()),
+ patterns=[],
+ kwd_attrs=[],
+ kwd_patterns=[],
+ ),
+ ast.MatchClass(
+ name_carter, patterns=[], kwd_attrs=["True"], kwd_patterns=[pattern_1]
+ ),
+ ast.MatchClass(
+ name_carter, patterns=[], kwd_attrs=[], kwd_patterns=[pattern_1]
+ ),
+ ast.MatchClass(
+ name_carter,
+ patterns=[ast.MatchSingleton("string")],
+ kwd_attrs=[],
+ kwd_patterns=[],
+ ),
+ ast.MatchClass(
+ name_carter, patterns=[ast.MatchStar()], kwd_attrs=[], kwd_patterns=[]
+ ),
+ ast.MatchClass(
+ name_carter, patterns=[], kwd_attrs=[], kwd_patterns=[ast.MatchStar()]
+ ),
+ ast.MatchClass(
+ constant_true, # invalid name
+ patterns=[],
+ kwd_attrs=["True"],
+ kwd_patterns=[pattern_1],
+ ),
+ ast.MatchSequence([ast.MatchStar("True")]),
+ ast.MatchAs(name="False"),
+ ast.MatchOr([]),
+ ast.MatchOr([pattern_1]),
+ ast.MatchOr([pattern_1, pattern_x, ast.MatchSingleton("xxx")]),
+ ast.MatchAs(name="_"),
+ ast.MatchStar(name="x"),
+ ast.MatchSequence([ast.MatchStar("_")]),
+ ast.MatchMapping([], [], rest="_"),
+ ]
+
+ # TODO: RUSTPYTHON
+ @unittest.skip("TODO: RUSTPYTHON; thread 'main' panicked")
+ def test_match_validation_pattern(self):
+ name_x = ast.Name("x", ast.Load())
+ for pattern in self._MATCH_PATTERNS:
+ with self.subTest(ast.dump(pattern, indent=4)):
+ node = ast.Match(
+ subject=name_x,
+ cases=[ast.match_case(pattern=pattern, body=[ast.Pass()])],
+ )
+ node = ast.fix_missing_locations(node)
+ module = ast.Module([node], [])
+ with self.assertRaises(ValueError):
+ compile(module, "", "exec")
+
+
+class ConstantTests(unittest.TestCase):
+ """Tests on the ast.Constant node type."""
+
+ def compile_constant(self, value):
+ tree = ast.parse("x = 123")
+
+ node = tree.body[0].value
+ new_node = ast.Constant(value=value)
+ ast.copy_location(new_node, node)
+ tree.body[0].value = new_node
+
+ code = compile(tree, "", "exec")
+
+ ns = {}
+ exec(code, ns)
+ return ns["x"]
+
+ def test_validation(self):
+ with self.assertRaises(TypeError) as cm:
+ self.compile_constant([1, 2, 3])
+ self.assertEqual(str(cm.exception), "got an invalid type in Constant: list")
+
+ # TODO: RUSTPYTHON; b'' is not b''
+ @unittest.expectedFailure
+ def test_singletons(self):
+ for const in (None, False, True, Ellipsis, b"", frozenset()):
+ with self.subTest(const=const):
+ value = self.compile_constant(const)
+ self.assertIs(value, const)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_values(self):
+ nested_tuple = (1,)
+ nested_frozenset = frozenset({1})
+ for level in range(3):
+ nested_tuple = (nested_tuple, 2)
+ nested_frozenset = frozenset({nested_frozenset, 2})
+ values = (
+ 123,
+ 123.0,
+ 123j,
+ "unicode",
+ b"bytes",
+ tuple("tuple"),
+ frozenset("frozenset"),
+ nested_tuple,
+ nested_frozenset,
+ )
+ for value in values:
+ with self.subTest(value=value):
+ result = self.compile_constant(value)
+ self.assertEqual(result, value)
+
+ # TODO: RUSTPYTHON; SyntaxError: cannot assign to literal
+ @unittest.expectedFailure
+ def test_assign_to_constant(self):
+ tree = ast.parse("x = 1")
+
+ target = tree.body[0].targets[0]
+ new_target = ast.Constant(value=1)
+ ast.copy_location(new_target, target)
+ tree.body[0].targets[0] = new_target
+
+ with self.assertRaises(ValueError) as cm:
+ compile(tree, "string", "exec")
+ self.assertEqual(
+ str(cm.exception),
+ "expression which can't be assigned " "to in Store context",
+ )
+
+ def test_get_docstring(self):
+ tree = ast.parse("'docstring'\nx = 1")
+ self.assertEqual(ast.get_docstring(tree), "docstring")
+
+ def get_load_const(self, tree):
+ # Compile to bytecode, disassemble and get parameter of LOAD_CONST
+ # instructions
+ co = compile(tree, "", "exec")
+ consts = []
+ for instr in dis.get_instructions(co):
+ if instr.opname == "LOAD_CONST" or instr.opname == "RETURN_CONST":
+ consts.append(instr.argval)
+ return consts
+
+ @support.cpython_only
+ def test_load_const(self):
+ consts = [None, True, False, 124, 2.0, 3j, "unicode", b"bytes", (1, 2, 3)]
+
+ code = "\n".join(["x={!r}".format(const) for const in consts])
+ code += "\nx = ..."
+ consts.extend((Ellipsis, None))
+
+ tree = ast.parse(code)
+ self.assertEqual(self.get_load_const(tree), consts)
+
+ # Replace expression nodes with constants
+ for assign, const in zip(tree.body, consts):
+ assert isinstance(assign, ast.Assign), ast.dump(assign)
+ new_node = ast.Constant(value=const)
+ ast.copy_location(new_node, assign.value)
+ assign.value = new_node
+
+ self.assertEqual(self.get_load_const(tree), consts)
+
+ def test_literal_eval(self):
+ tree = ast.parse("1 + 2")
+ binop = tree.body[0].value
+
+ new_left = ast.Constant(value=10)
+ ast.copy_location(new_left, binop.left)
+ binop.left = new_left
+
+ new_right = ast.Constant(value=20j)
+ ast.copy_location(new_right, binop.right)
+ binop.right = new_right
+
+ self.assertEqual(ast.literal_eval(binop), 10 + 20j)
+
+ def test_string_kind(self):
+ c = ast.parse('"x"', mode="eval").body
+ self.assertEqual(c.value, "x")
+ self.assertEqual(c.kind, None)
+
+ c = ast.parse('u"x"', mode="eval").body
+ self.assertEqual(c.value, "x")
+ self.assertEqual(c.kind, "u")
+
+ c = ast.parse('r"x"', mode="eval").body
+ self.assertEqual(c.value, "x")
+ self.assertEqual(c.kind, None)
+
+ c = ast.parse('b"x"', mode="eval").body
+ self.assertEqual(c.value, b"x")
+ self.assertEqual(c.kind, None)
+
+
+class EndPositionTests(unittest.TestCase):
+ """Tests for end position of AST nodes.
+
+ Testing end positions of nodes requires a bit of extra care
+ because of how LL parsers work.
+ """
+
+ def _check_end_pos(self, ast_node, end_lineno, end_col_offset):
+ self.assertEqual(ast_node.end_lineno, end_lineno)
+ self.assertEqual(ast_node.end_col_offset, end_col_offset)
+
+ def _check_content(self, source, ast_node, content):
+ self.assertEqual(ast.get_source_segment(source, ast_node), content)
+
+ def _parse_value(self, s):
+ # Use duck-typing to support both single expression
+ # and a right hand side of an assignment statement.
+ return ast.parse(s).body[0].value
+
+ def test_lambda(self):
+ s = "lambda x, *y: None"
+ lam = self._parse_value(s)
+ self._check_content(s, lam.body, "None")
+ self._check_content(s, lam.args.args[0], "x")
+ self._check_content(s, lam.args.vararg, "y")
+
+ def test_func_def(self):
+ s = dedent("""
+ def func(x: int,
+ *args: str,
+ z: float = 0,
+ **kwargs: Any) -> bool:
+ return True
+ """).strip()
+ fdef = ast.parse(s).body[0]
+ self._check_end_pos(fdef, 5, 15)
+ self._check_content(s, fdef.body[0], "return True")
+ self._check_content(s, fdef.args.args[0], "x: int")
+ self._check_content(s, fdef.args.args[0].annotation, "int")
+ self._check_content(s, fdef.args.kwarg, "kwargs: Any")
+ self._check_content(s, fdef.args.kwarg.annotation, "Any")
+
+ def test_call(self):
+ s = "func(x, y=2, **kw)"
+ call = self._parse_value(s)
+ self._check_content(s, call.func, "func")
+ self._check_content(s, call.keywords[0].value, "2")
+ self._check_content(s, call.keywords[1].value, "kw")
+
+ def test_call_noargs(self):
+ s = "x[0]()"
+ call = self._parse_value(s)
+ self._check_content(s, call.func, "x[0]")
+ self._check_end_pos(call, 1, 6)
+
+ def test_class_def(self):
+ s = dedent("""
+ class C(A, B):
+ x: int = 0
+ """).strip()
+ cdef = ast.parse(s).body[0]
+ self._check_end_pos(cdef, 2, 14)
+ self._check_content(s, cdef.bases[1], "B")
+ self._check_content(s, cdef.body[0], "x: int = 0")
+
+ def test_class_kw(self):
+ s = "class S(metaclass=abc.ABCMeta): pass"
+ cdef = ast.parse(s).body[0]
+ self._check_content(s, cdef.keywords[0].value, "abc.ABCMeta")
+
+ def test_multi_line_str(self):
+ s = dedent('''
+ x = """Some multi-line text.
+
+ It goes on starting from same indent."""
+ ''').strip()
+ assign = ast.parse(s).body[0]
+ self._check_end_pos(assign, 3, 40)
+ self._check_end_pos(assign.value, 3, 40)
+
+ def test_continued_str(self):
+ s = dedent("""
+ x = "first part" \\
+ "second part"
+ """).strip()
+ assign = ast.parse(s).body[0]
+ self._check_end_pos(assign, 2, 13)
+ self._check_end_pos(assign.value, 2, 13)
+
+ def test_suites(self):
+ # We intentionally put these into the same string to check
+ # that empty lines are not part of the suite.
+ s = dedent("""
+ while True:
+ pass
+
+ if one():
+ x = None
+ elif other():
+ y = None
+ else:
+ z = None
+
+ for x, y in stuff:
+ assert True
+
+ try:
+ raise RuntimeError
+ except TypeError as e:
+ pass
+
+ pass
+ """).strip()
+ mod = ast.parse(s)
+ while_loop = mod.body[0]
+ if_stmt = mod.body[1]
+ for_loop = mod.body[2]
+ try_stmt = mod.body[3]
+ pass_stmt = mod.body[4]
+
+ self._check_end_pos(while_loop, 2, 8)
+ self._check_end_pos(if_stmt, 9, 12)
+ self._check_end_pos(for_loop, 12, 15)
+ self._check_end_pos(try_stmt, 17, 8)
+ self._check_end_pos(pass_stmt, 19, 4)
+
+ self._check_content(s, while_loop.test, "True")
+ self._check_content(s, if_stmt.body[0], "x = None")
+ self._check_content(s, if_stmt.orelse[0].test, "other()")
+ self._check_content(s, for_loop.target, "x, y")
+ self._check_content(s, try_stmt.body[0], "raise RuntimeError")
+ self._check_content(s, try_stmt.handlers[0].type, "TypeError")
+
+ def test_fstring(self):
+ s = 'x = f"abc {x + y} abc"'
+ fstr = self._parse_value(s)
+ binop = fstr.values[1].value
+ self._check_content(s, binop, "x + y")
+
+ def test_fstring_multi_line(self):
+ s = dedent('''
+ f"""Some multi-line text.
+ {
+ arg_one
+ +
+ arg_two
+ }
+ It goes on..."""
+ ''').strip()
+ fstr = self._parse_value(s)
+ binop = fstr.values[1].value
+ self._check_end_pos(binop, 5, 7)
+ self._check_content(s, binop.left, "arg_one")
+ self._check_content(s, binop.right, "arg_two")
+
+ def test_import_from_multi_line(self):
+ s = dedent("""
+ from x.y.z import (
+ a, b, c as c
+ )
+ """).strip()
+ imp = ast.parse(s).body[0]
+ self._check_end_pos(imp, 3, 1)
+ self._check_end_pos(imp.names[2], 2, 16)
+
+ def test_slices(self):
+ s1 = "f()[1, 2] [0]"
+ s2 = "x[ a.b: c.d]"
+ sm = dedent("""
+ x[ a.b: f () ,
+ g () : c.d
+ ]
+ """).strip()
+ i1, i2, im = map(self._parse_value, (s1, s2, sm))
+ self._check_content(s1, i1.value, "f()[1, 2]")
+ self._check_content(s1, i1.value.slice, "1, 2")
+ self._check_content(s2, i2.slice.lower, "a.b")
+ self._check_content(s2, i2.slice.upper, "c.d")
+ self._check_content(sm, im.slice.elts[0].upper, "f ()")
+ self._check_content(sm, im.slice.elts[1].lower, "g ()")
+ self._check_end_pos(im, 3, 3)
+
+ def test_binop(self):
+ s = dedent("""
+ (1 * 2 + (3 ) +
+ 4
+ )
+ """).strip()
+ binop = self._parse_value(s)
+ self._check_end_pos(binop, 2, 6)
+ self._check_content(s, binop.right, "4")
+ self._check_content(s, binop.left, "1 * 2 + (3 )")
+ self._check_content(s, binop.left.right, "3")
+
+ def test_boolop(self):
+ s = dedent("""
+ if (one_condition and
+ (other_condition or yet_another_one)):
+ pass
+ """).strip()
+ bop = ast.parse(s).body[0].test
+ self._check_end_pos(bop, 2, 44)
+ self._check_content(s, bop.values[1], "other_condition or yet_another_one")
+
+ def test_tuples(self):
+ s1 = "x = () ;"
+ s2 = "x = 1 , ;"
+ s3 = "x = (1 , 2 ) ;"
+ sm = dedent("""
+ x = (
+ a, b,
+ )
+ """).strip()
+ t1, t2, t3, tm = map(self._parse_value, (s1, s2, s3, sm))
+ self._check_content(s1, t1, "()")
+ self._check_content(s2, t2, "1 ,")
+ self._check_content(s3, t3, "(1 , 2 )")
+ self._check_end_pos(tm, 3, 1)
+
+ def test_attribute_spaces(self):
+ s = "func(x. y .z)"
+ call = self._parse_value(s)
+ self._check_content(s, call, s)
+ self._check_content(s, call.args[0], "x. y .z")
+
+ def test_redundant_parenthesis(self):
+ s = "( ( ( a + b ) ) )"
+ v = ast.parse(s).body[0].value
+ self.assertEqual(type(v).__name__, "BinOp")
+ self._check_content(s, v, "a + b")
+ s2 = "await " + s
+ v = ast.parse(s2).body[0].value.value
+ self.assertEqual(type(v).__name__, "BinOp")
+ self._check_content(s2, v, "a + b")
+
+ def test_trailers_with_redundant_parenthesis(self):
+ tests = (
+ ("( ( ( a ) ) ) ( )", "Call"),
+ ("( ( ( a ) ) ) ( b )", "Call"),
+ ("( ( ( a ) ) ) [ b ]", "Subscript"),
+ ("( ( ( a ) ) ) . b", "Attribute"),
+ )
+ for s, t in tests:
+ with self.subTest(s):
+ v = ast.parse(s).body[0].value
+ self.assertEqual(type(v).__name__, t)
+ self._check_content(s, v, s)
+ s2 = "await " + s
+ v = ast.parse(s2).body[0].value.value
+ self.assertEqual(type(v).__name__, t)
+ self._check_content(s2, v, s)
+
+ def test_displays(self):
+ s1 = "[{}, {1, }, {1, 2,} ]"
+ s2 = "{a: b, f (): g () ,}"
+ c1 = self._parse_value(s1)
+ c2 = self._parse_value(s2)
+ self._check_content(s1, c1.elts[0], "{}")
+ self._check_content(s1, c1.elts[1], "{1, }")
+ self._check_content(s1, c1.elts[2], "{1, 2,}")
+ self._check_content(s2, c2.keys[1], "f ()")
+ self._check_content(s2, c2.values[1], "g ()")
+
+ def test_comprehensions(self):
+ s = dedent("""
+ x = [{x for x, y in stuff
+ if cond.x} for stuff in things]
+ """).strip()
+ cmp = self._parse_value(s)
+ self._check_end_pos(cmp, 2, 37)
+ self._check_content(s, cmp.generators[0].iter, "things")
+ self._check_content(s, cmp.elt.generators[0].iter, "stuff")
+ self._check_content(s, cmp.elt.generators[0].ifs[0], "cond.x")
+ self._check_content(s, cmp.elt.generators[0].target, "x, y")
+
+ def test_yield_await(self):
+ s = dedent("""
+ async def f():
+ yield x
+ await y
+ """).strip()
+ fdef = ast.parse(s).body[0]
+ self._check_content(s, fdef.body[0].value, "yield x")
+ self._check_content(s, fdef.body[1].value, "await y")
+
+ def test_source_segment_multi(self):
+ s_orig = dedent("""
+ x = (
+ a, b,
+ ) + ()
+ """).strip()
+ s_tuple = dedent("""
+ (
+ a, b,
+ )
+ """).strip()
+ binop = self._parse_value(s_orig)
+ self.assertEqual(ast.get_source_segment(s_orig, binop.left), s_tuple)
+
+ def test_source_segment_padded(self):
+ s_orig = dedent("""
+ class C:
+ def fun(self) -> None:
+ "ЖЖЖЖЖ"
+ """).strip()
+ s_method = " def fun(self) -> None:\n" ' "ЖЖЖЖЖ"'
+ cdef = ast.parse(s_orig).body[0]
+ self.assertEqual(
+ ast.get_source_segment(s_orig, cdef.body[0], padded=True), s_method
+ )
+
+ def test_source_segment_endings(self):
+ s = "v = 1\r\nw = 1\nx = 1\n\ry = 1\rz = 1\r\n"
+ v, w, x, y, z = ast.parse(s).body
+ self._check_content(s, v, "v = 1")
+ self._check_content(s, w, "w = 1")
+ self._check_content(s, x, "x = 1")
+ self._check_content(s, y, "y = 1")
+ self._check_content(s, z, "z = 1")
+
+ def test_source_segment_tabs(self):
+ s = dedent("""
+ class C:
+ \t\f def fun(self) -> None:
+ \t\f pass
+ """).strip()
+ s_method = " \t\f def fun(self) -> None:\n" " \t\f pass"
+
+ cdef = ast.parse(s).body[0]
+ self.assertEqual(ast.get_source_segment(s, cdef.body[0], padded=True), s_method)
+
+ def test_source_segment_newlines(self):
+ s = "def f():\n pass\ndef g():\r pass\r\ndef h():\r\n pass\r\n"
+ f, g, h = ast.parse(s).body
+ self._check_content(s, f, "def f():\n pass")
+ self._check_content(s, g, "def g():\r pass")
+ self._check_content(s, h, "def h():\r\n pass")
+
+ s = "def f():\n a = 1\r b = 2\r\n c = 3\n"
+ f = ast.parse(s).body[0]
+ self._check_content(s, f, s.rstrip())
+
+ def test_source_segment_missing_info(self):
+ s = "v = 1\r\nw = 1\nx = 1\n\ry = 1\r\n"
+ v, w, x, y = ast.parse(s).body
+ del v.lineno
+ del w.end_lineno
+ del x.col_offset
+ del y.end_col_offset
+ self.assertIsNone(ast.get_source_segment(s, v))
+ self.assertIsNone(ast.get_source_segment(s, w))
+ self.assertIsNone(ast.get_source_segment(s, x))
+ self.assertIsNone(ast.get_source_segment(s, y))
+
+
+class BaseNodeVisitorCases:
+ # Both `NodeVisitor` and `NodeTranformer` must raise these warnings:
+ def test_old_constant_nodes(self):
+ class Visitor(self.visitor_class):
+ def visit_Num(self, node):
+ log.append((node.lineno, "Num", node.n))
+
+ def visit_Str(self, node):
+ log.append((node.lineno, "Str", node.s))
+
+ def visit_Bytes(self, node):
+ log.append((node.lineno, "Bytes", node.s))
+
+ def visit_NameConstant(self, node):
+ log.append((node.lineno, "NameConstant", node.value))
+
+ def visit_Ellipsis(self, node):
+ log.append((node.lineno, "Ellipsis", ...))
+
+ mod = ast.parse(
+ dedent("""\
+ i = 42
+ f = 4.25
+ c = 4.25j
+ s = 'string'
+ b = b'bytes'
+ t = True
+ n = None
+ e = ...
+ """)
+ )
+ visitor = Visitor()
+ log = []
+ with warnings.catch_warnings(record=True) as wlog:
+ warnings.filterwarnings("always", "", DeprecationWarning)
+ visitor.visit(mod)
+ self.assertEqual(
+ log,
+ [
+ (1, "Num", 42),
+ (2, "Num", 4.25),
+ (3, "Num", 4.25j),
+ (4, "Str", "string"),
+ (5, "Bytes", b"bytes"),
+ (6, "NameConstant", True),
+ (7, "NameConstant", None),
+ (8, "Ellipsis", ...),
+ ],
+ )
+ self.assertEqual(
+ [str(w.message) for w in wlog],
+ [
+ "visit_Num is deprecated; add visit_Constant",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "visit_Num is deprecated; add visit_Constant",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "visit_Num is deprecated; add visit_Constant",
+ "Attribute n is deprecated and will be removed in Python 3.14; use value instead",
+ "visit_Str is deprecated; add visit_Constant",
+ "Attribute s is deprecated and will be removed in Python 3.14; use value instead",
+ "visit_Bytes is deprecated; add visit_Constant",
+ "Attribute s is deprecated and will be removed in Python 3.14; use value instead",
+ "visit_NameConstant is deprecated; add visit_Constant",
+ "visit_NameConstant is deprecated; add visit_Constant",
+ "visit_Ellipsis is deprecated; add visit_Constant",
+ ],
+ )
+
+
+class NodeVisitorTests(BaseNodeVisitorCases, unittest.TestCase):
+ visitor_class = ast.NodeVisitor
+
+
+class NodeTransformerTests(ASTTestMixin, BaseNodeVisitorCases, unittest.TestCase):
+ visitor_class = ast.NodeTransformer
+
+ def assertASTTransformation(self, tranformer_class, initial_code, expected_code):
+ initial_ast = ast.parse(dedent(initial_code))
+ expected_ast = ast.parse(dedent(expected_code))
+
+ tranformer = tranformer_class()
+ result_ast = ast.fix_missing_locations(tranformer.visit(initial_ast))
+
+ self.assertASTEqual(result_ast, expected_ast)
+
+ # TODO: RUSTPYTHON; is not
+ @unittest.expectedFailure
+ def test_node_remove_single(self):
+ code = "def func(arg) -> SomeType: ..."
+ expected = "def func(arg): ..."
+
+ # Since `FunctionDef.returns` is defined as a single value, we test
+ # the `if isinstance(old_value, AST):` branch here.
+ class SomeTypeRemover(ast.NodeTransformer):
+ def visit_Name(self, node: ast.Name):
+ self.generic_visit(node)
+ if node.id == "SomeType":
+ return None
+ return node
+
+ self.assertASTTransformation(SomeTypeRemover, code, expected)
+
+ def test_node_remove_from_list(self):
+ code = """
+ def func(arg):
+ print(arg)
+ yield arg
+ """
+ expected = """
+ def func(arg):
+ print(arg)
+ """
+
+ # Since `FunctionDef.body` is defined as a list, we test
+ # the `if isinstance(old_value, list):` branch here.
+ class YieldRemover(ast.NodeTransformer):
+ def visit_Expr(self, node: ast.Expr):
+ self.generic_visit(node)
+ if isinstance(node.value, ast.Yield):
+ return None # Remove `yield` from a function
+ return node
+
+ self.assertASTTransformation(YieldRemover, code, expected)
+
+ # TODO: RUSTPYTHON; is not
+ @unittest.expectedFailure
+ def test_node_return_list(self):
+ code = """
+ class DSL(Base, kw1=True): ...
+ """
+ expected = """
+ class DSL(Base, kw1=True, kw2=True, kw3=False): ...
+ """
+
+ class ExtendKeywords(ast.NodeTransformer):
+ def visit_keyword(self, node: ast.keyword):
+ self.generic_visit(node)
+ if node.arg == "kw1":
+ return [
+ node,
+ ast.keyword("kw2", ast.Constant(True)),
+ ast.keyword("kw3", ast.Constant(False)),
+ ]
+ return node
+
+ self.assertASTTransformation(ExtendKeywords, code, expected)
+
+ def test_node_mutate(self):
+ code = """
+ def func(arg):
+ print(arg)
+ """
+ expected = """
+ def func(arg):
+ log(arg)
+ """
+
+ class PrintToLog(ast.NodeTransformer):
+ def visit_Call(self, node: ast.Call):
+ self.generic_visit(node)
+ if isinstance(node.func, ast.Name) and node.func.id == "print":
+ node.func.id = "log"
+ return node
+
+ self.assertASTTransformation(PrintToLog, code, expected)
+
+ # TODO: RUSTPYTHON; is not
+ @unittest.expectedFailure
+ def test_node_replace(self):
+ code = """
+ def func(arg):
+ print(arg)
+ """
+ expected = """
+ def func(arg):
+ logger.log(arg, debug=True)
+ """
+
+ class PrintToLog(ast.NodeTransformer):
+ def visit_Call(self, node: ast.Call):
+ self.generic_visit(node)
+ if isinstance(node.func, ast.Name) and node.func.id == "print":
+ return ast.Call(
+ func=ast.Attribute(
+ ast.Name("logger", ctx=ast.Load()),
+ attr="log",
+ ctx=ast.Load(),
+ ),
+ args=node.args,
+ keywords=[ast.keyword("debug", ast.Constant(True))],
+ )
+ return node
+
+ self.assertASTTransformation(PrintToLog, code, expected)
+
+
+class ASTConstructorTests(unittest.TestCase):
+ """Test the autogenerated constructors for AST nodes."""
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_FunctionDef(self):
+ args = ast.arguments()
+ self.assertEqual(args.args, [])
+ self.assertEqual(args.posonlyargs, [])
+ with self.assertWarnsRegex(
+ DeprecationWarning,
+ r"FunctionDef\.__init__ missing 1 required positional argument: 'name'",
+ ):
+ node = ast.FunctionDef(args=args)
+ self.assertFalse(hasattr(node, "name"))
+ self.assertEqual(node.decorator_list, [])
+ node = ast.FunctionDef(name="foo", args=args)
+ self.assertEqual(node.name, "foo")
+ self.assertEqual(node.decorator_list, [])
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_expr_context(self):
+ name = ast.Name("x")
+ self.assertEqual(name.id, "x")
+ self.assertIsInstance(name.ctx, ast.Load)
+
+ name2 = ast.Name("x", ast.Store())
+ self.assertEqual(name2.id, "x")
+ self.assertIsInstance(name2.ctx, ast.Store)
+
+ name3 = ast.Name("x", ctx=ast.Del())
+ self.assertEqual(name3.id, "x")
+ self.assertIsInstance(name3.ctx, ast.Del)
+
+ with self.assertWarnsRegex(
+ DeprecationWarning,
+ r"Name\.__init__ missing 1 required positional argument: 'id'",
+ ):
+ name3 = ast.Name()
+
+ def test_custom_subclass_with_no_fields(self):
+ class NoInit(ast.AST):
+ pass
+
+ obj = NoInit()
+ self.assertIsInstance(obj, NoInit)
+ self.assertEqual(obj.__dict__, {})
+
+ def test_fields_but_no_field_types(self):
+ class Fields(ast.AST):
+ _fields = ("a",)
+
+ obj = Fields()
+ with self.assertRaises(AttributeError):
+ obj.a
+ obj = Fields(a=1)
+ self.assertEqual(obj.a, 1)
+
+ def test_fields_and_types(self):
+ class FieldsAndTypes(ast.AST):
+ _fields = ("a",)
+ _field_types = {"a": int | None}
+ a: int | None = None
+
+ obj = FieldsAndTypes()
+ self.assertIs(obj.a, None)
+ obj = FieldsAndTypes(a=1)
+ self.assertEqual(obj.a, 1)
+
+ # TODO: RUSTPYTHON; DeprecationWarning not triggered
+ @unittest.expectedFailure
+ def test_custom_attributes(self):
+ class MyAttrs(ast.AST):
+ _attributes = ("a", "b")
+
+ obj = MyAttrs(a=1, b=2)
+ self.assertEqual(obj.a, 1)
+ self.assertEqual(obj.b, 2)
+
+ with self.assertWarnsRegex(
+ DeprecationWarning,
+ r"MyAttrs.__init__ got an unexpected keyword argument 'c'.",
+ ):
+ obj = MyAttrs(c=3)
+
+ # TODO: RUSTPYTHON; DeprecationWarning not triggered
+ @unittest.expectedFailure
+ def test_fields_and_types_no_default(self):
+ class FieldsAndTypesNoDefault(ast.AST):
+ _fields = ("a",)
+ _field_types = {"a": int}
+
+ with self.assertWarnsRegex(
+ DeprecationWarning,
+ r"FieldsAndTypesNoDefault\.__init__ missing 1 required positional argument: 'a'\.",
+ ):
+ obj = FieldsAndTypesNoDefault()
+ with self.assertRaises(AttributeError):
+ obj.a
+ obj = FieldsAndTypesNoDefault(a=1)
+ self.assertEqual(obj.a, 1)
+
+ # TODO: RUSTPYTHON; DeprecationWarning not triggered
+ @unittest.expectedFailure
+ def test_incomplete_field_types(self):
+ class MoreFieldsThanTypes(ast.AST):
+ _fields = ("a", "b")
+ _field_types = {"a": int | None}
+ a: int | None = None
+ b: int | None = None
+
+ with self.assertWarnsRegex(
+ DeprecationWarning,
+ r"Field 'b' is missing from MoreFieldsThanTypes\._field_types",
+ ):
+ obj = MoreFieldsThanTypes()
+ self.assertIs(obj.a, None)
+ self.assertIs(obj.b, None)
+
+ obj = MoreFieldsThanTypes(a=1, b=2)
+ self.assertEqual(obj.a, 1)
+ self.assertEqual(obj.b, 2)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_complete_field_types(self):
+ class _AllFieldTypes(ast.AST):
+ _fields = ("a", "b")
+ _field_types = {"a": int | None, "b": list[str]}
+ # This must be set explicitly
+ a: int | None = None
+ # This will add an implicit empty list default
+ b: list[str]
+
+ obj = _AllFieldTypes()
+ self.assertIs(obj.a, None)
+ self.assertEqual(obj.b, [])
+
+
+@support.cpython_only
+class ModuleStateTests(unittest.TestCase):
+ # bpo-41194, bpo-41261, bpo-41631: The _ast module uses a global state.
+
+ def check_ast_module(self):
+ # Check that the _ast module still works as expected
+ code = "x + 1"
+ filename = ""
+ mode = "eval"
+
+ # Create _ast.AST subclasses instances
+ ast_tree = compile(code, filename, mode, flags=ast.PyCF_ONLY_AST)
+
+ # Call PyAST_Check()
+ code = compile(ast_tree, filename, mode)
+ self.assertIsInstance(code, types.CodeType)
+
+ def test_reload_module(self):
+ # bpo-41194: Importing the _ast module twice must not crash.
+ with support.swap_item(sys.modules, "_ast", None):
+ del sys.modules["_ast"]
+ import _ast as ast1
+
+ del sys.modules["_ast"]
+ import _ast as ast2
+
+ self.check_ast_module()
+
+ # Unloading the two _ast module instances must not crash.
+ del ast1
+ del ast2
+ support.gc_collect()
+
+ self.check_ast_module()
+
+ def test_sys_modules(self):
+ # bpo-41631: Test reproducing a Mercurial crash when PyAST_Check()
+ # imported the _ast module internally.
+ lazy_mod = object()
+
+ def my_import(name, *args, **kw):
+ sys.modules[name] = lazy_mod
+ return lazy_mod
+
+ with support.swap_item(sys.modules, "_ast", None):
+ del sys.modules["_ast"]
+
+ with support.swap_attr(builtins, "__import__", my_import):
+ # Test that compile() does not import the _ast module
+ self.check_ast_module()
+ self.assertNotIn("_ast", sys.modules)
+
+ # Sanity check of the test itself
+ import _ast
+
+ self.assertIs(_ast, lazy_mod)
+
+ def test_subinterpreter(self):
+ # bpo-41631: Importing and using the _ast module in a subinterpreter
+ # must not crash.
+ code = dedent("""
+ import _ast
+ import ast
+ import gc
+ import sys
+ import types
+
+ # Create _ast.AST subclasses instances and call PyAST_Check()
+ ast_tree = compile('x+1', '', 'eval',
+ flags=ast.PyCF_ONLY_AST)
+ code = compile(ast_tree, 'string', 'eval')
+ if not isinstance(code, types.CodeType):
+ raise AssertionError
+
+ # Unloading the _ast module must not crash.
+ del ast, _ast
+ del sys.modules['ast'], sys.modules['_ast']
+ gc.collect()
+ """)
+ res = support.run_in_subinterp(code)
+ self.assertEqual(res, 0)
+
+
+class ASTMainTests(unittest.TestCase):
+ # Tests `ast.main()` function.
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_cli_file_input(self):
+ code = "print(1, 2, 3)"
+ expected = ast.dump(ast.parse(code), indent=3)
+
+ with os_helper.temp_dir() as tmp_dir:
+ filename = os.path.join(tmp_dir, "test_module.py")
+ with open(filename, "w", encoding="utf-8") as f:
+ f.write(code)
+ res, _ = script_helper.run_python_until_end("-m", "ast", filename)
+
+ self.assertEqual(res.err, b"")
+ self.assertEqual(expected.splitlines(), res.out.decode("utf8").splitlines())
+ self.assertEqual(res.rc, 0)
+
+def compare(left, right):
+ return ast.dump(left) == ast.dump(right)
+
+class ASTOptimiziationTests(unittest.TestCase):
+ binop = {
+ "+": ast.Add(),
+ "-": ast.Sub(),
+ "*": ast.Mult(),
+ "/": ast.Div(),
+ "%": ast.Mod(),
+ "<<": ast.LShift(),
+ ">>": ast.RShift(),
+ "|": ast.BitOr(),
+ "^": ast.BitXor(),
+ "&": ast.BitAnd(),
+ "//": ast.FloorDiv(),
+ "**": ast.Pow(),
+ }
+
+ unaryop = {
+ "~": ast.Invert(),
+ "+": ast.UAdd(),
+ "-": ast.USub(),
+ }
+
+ def wrap_expr(self, expr):
+ return ast.Module(body=[ast.Expr(value=expr)])
+
+ def wrap_statement(self, statement):
+ return ast.Module(body=[statement])
+
+ def assert_ast(self, code, non_optimized_target, optimized_target):
+
+ non_optimized_tree = ast.parse(code, optimize=-1)
+ optimized_tree = ast.parse(code, optimize=1)
+
+ # Is a non-optimized tree equal to a non-optimized target?
+ self.assertTrue(
+ compare(non_optimized_tree, non_optimized_target),
+ f"{ast.dump(non_optimized_target)} must equal "
+ f"{ast.dump(non_optimized_tree)}",
+ )
+
+ # Is a optimized tree equal to a non-optimized target?
+ self.assertFalse(
+ compare(optimized_tree, non_optimized_target),
+ f"{ast.dump(non_optimized_target)} must not equal "
+ f"{ast.dump(non_optimized_tree)}"
+ )
+
+ # Is a optimized tree is equal to an optimized target?
+ self.assertTrue(
+ compare(optimized_tree, optimized_target),
+ f"{ast.dump(optimized_target)} must equal "
+ f"{ast.dump(optimized_tree)}",
+ )
+
+ def create_binop(self, operand, left=ast.Constant(1), right=ast.Constant(1)):
+ return ast.BinOp(left=left, op=self.binop[operand], right=right)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_binop(self):
+ code = "1 %s 1"
+ operators = self.binop.keys()
+
+ for op in operators:
+ result_code = code % op
+ non_optimized_target = self.wrap_expr(self.create_binop(op))
+ optimized_target = self.wrap_expr(ast.Constant(value=eval(result_code)))
+
+ with self.subTest(
+ result_code=result_code,
+ non_optimized_target=non_optimized_target,
+ optimized_target=optimized_target
+ ):
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+ # Multiplication of constant tuples must be folded
+ code = "(1,) * 3"
+ non_optimized_target = self.wrap_expr(self.create_binop("*", ast.Tuple(elts=[ast.Constant(value=1)]), ast.Constant(value=3)))
+ optimized_target = self.wrap_expr(ast.Constant(eval(code)))
+
+ self.assert_ast(code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_unaryop(self):
+ code = "%s1"
+ operators = self.unaryop.keys()
+
+ def create_unaryop(operand):
+ return ast.UnaryOp(op=self.unaryop[operand], operand=ast.Constant(1))
+
+ for op in operators:
+ result_code = code % op
+ non_optimized_target = self.wrap_expr(create_unaryop(op))
+ optimized_target = self.wrap_expr(ast.Constant(eval(result_code)))
+
+ with self.subTest(
+ result_code=result_code,
+ non_optimized_target=non_optimized_target,
+ optimized_target=optimized_target
+ ):
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_not(self):
+ code = "not (1 %s (1,))"
+ operators = {
+ "in": ast.In(),
+ "is": ast.Is(),
+ }
+ opt_operators = {
+ "is": ast.IsNot(),
+ "in": ast.NotIn(),
+ }
+
+ def create_notop(operand):
+ return ast.UnaryOp(op=ast.Not(), operand=ast.Compare(
+ left=ast.Constant(value=1),
+ ops=[operators[operand]],
+ comparators=[ast.Tuple(elts=[ast.Constant(value=1)])]
+ ))
+
+ for op in operators.keys():
+ result_code = code % op
+ non_optimized_target = self.wrap_expr(create_notop(op))
+ optimized_target = self.wrap_expr(
+ ast.Compare(left=ast.Constant(1), ops=[opt_operators[op]], comparators=[ast.Constant(value=(1,))])
+ )
+
+ with self.subTest(
+ result_code=result_code,
+ non_optimized_target=non_optimized_target,
+ optimized_target=optimized_target
+ ):
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_format(self):
+ code = "'%s' % (a,)"
+
+ non_optimized_target = self.wrap_expr(
+ ast.BinOp(
+ left=ast.Constant(value="%s"),
+ op=ast.Mod(),
+ right=ast.Tuple(elts=[ast.Name(id='a')]))
+ )
+ optimized_target = self.wrap_expr(
+ ast.JoinedStr(
+ values=[
+ ast.FormattedValue(value=ast.Name(id='a'), conversion=115)
+ ]
+ )
+ )
+
+ self.assert_ast(code, non_optimized_target, optimized_target)
+
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_tuple(self):
+ code = "(1,)"
+
+ non_optimized_target = self.wrap_expr(ast.Tuple(elts=[ast.Constant(1)]))
+ optimized_target = self.wrap_expr(ast.Constant(value=(1,)))
+
+ self.assert_ast(code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_comparator(self):
+ code = "1 %s %s1%s"
+ operators = [("in", ast.In()), ("not in", ast.NotIn())]
+ braces = [
+ ("[", "]", ast.List, (1,)),
+ ("{", "}", ast.Set, frozenset({1})),
+ ]
+ for left, right, non_optimized_comparator, optimized_comparator in braces:
+ for op, node in operators:
+ non_optimized_target = self.wrap_expr(ast.Compare(
+ left=ast.Constant(1), ops=[node],
+ comparators=[non_optimized_comparator(elts=[ast.Constant(1)])]
+ ))
+ optimized_target = self.wrap_expr(ast.Compare(
+ left=ast.Constant(1), ops=[node],
+ comparators=[ast.Constant(value=optimized_comparator)]
+ ))
+ self.assert_ast(code % (op, left, right), non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_iter(self):
+ code = "for _ in %s1%s: pass"
+ braces = [
+ ("[", "]", ast.List, (1,)),
+ ("{", "}", ast.Set, frozenset({1})),
+ ]
+
+ for left, right, ast_cls, optimized_iter in braces:
+ non_optimized_target = self.wrap_statement(ast.For(
+ target=ast.Name(id="_", ctx=ast.Store()),
+ iter=ast_cls(elts=[ast.Constant(1)]),
+ body=[ast.Pass()]
+ ))
+ optimized_target = self.wrap_statement(ast.For(
+ target=ast.Name(id="_", ctx=ast.Store()),
+ iter=ast.Constant(value=optimized_iter),
+ body=[ast.Pass()]
+ ))
+
+ self.assert_ast(code % (left, right), non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_subscript(self):
+ code = "(1,)[0]"
+
+ non_optimized_target = self.wrap_expr(
+ ast.Subscript(value=ast.Tuple(elts=[ast.Constant(value=1)]), slice=ast.Constant(value=0))
+ )
+ optimized_target = self.wrap_expr(ast.Constant(value=1))
+
+ self.assert_ast(code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_type_param_in_function_def(self):
+ code = "def foo[%s = 1 + 1](): pass"
+
+ unoptimized_binop = self.create_binop("+")
+ unoptimized_type_params = [
+ ("T", "T", ast.TypeVar),
+ ("**P", "P", ast.ParamSpec),
+ ("*Ts", "Ts", ast.TypeVarTuple),
+ ]
+
+ for type, name, type_param in unoptimized_type_params:
+ result_code = code % type
+ optimized_target = self.wrap_statement(
+ ast.FunctionDef(
+ name='foo',
+ args=ast.arguments(),
+ body=[ast.Pass()],
+ type_params=[type_param(name=name, default_value=ast.Constant(2))]
+ )
+ )
+ non_optimized_target = self.wrap_statement(
+ ast.FunctionDef(
+ name='foo',
+ args=ast.arguments(),
+ body=[ast.Pass()],
+ type_params=[type_param(name=name, default_value=unoptimized_binop)]
+ )
+ )
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_type_param_in_class_def(self):
+ code = "class foo[%s = 1 + 1]: pass"
+
+ unoptimized_binop = self.create_binop("+")
+ unoptimized_type_params = [
+ ("T", "T", ast.TypeVar),
+ ("**P", "P", ast.ParamSpec),
+ ("*Ts", "Ts", ast.TypeVarTuple),
+ ]
+
+ for type, name, type_param in unoptimized_type_params:
+ result_code = code % type
+ optimized_target = self.wrap_statement(
+ ast.ClassDef(
+ name='foo',
+ body=[ast.Pass()],
+ type_params=[type_param(name=name, default_value=ast.Constant(2))]
+ )
+ )
+ non_optimized_target = self.wrap_statement(
+ ast.ClassDef(
+ name='foo',
+ body=[ast.Pass()],
+ type_params=[type_param(name=name, default_value=unoptimized_binop)]
+ )
+ )
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+ # TODO: RUSTPYTHON; ValueError: compile() unrecognized flags
+ @unittest.expectedFailure
+ def test_folding_type_param_in_type_alias(self):
+ code = "type foo[%s = 1 + 1] = 1"
+
+ unoptimized_binop = self.create_binop("+")
+ unoptimized_type_params = [
+ ("T", "T", ast.TypeVar),
+ ("**P", "P", ast.ParamSpec),
+ ("*Ts", "Ts", ast.TypeVarTuple),
+ ]
+
+ for type, name, type_param in unoptimized_type_params:
+ result_code = code % type
+ optimized_target = self.wrap_statement(
+ ast.TypeAlias(
+ name=ast.Name(id='foo', ctx=ast.Store()),
+ type_params=[type_param(name=name, default_value=ast.Constant(2))],
+ value=ast.Constant(value=1),
+ )
+ )
+ non_optimized_target = self.wrap_statement(
+ ast.TypeAlias(
+ name=ast.Name(id='foo', ctx=ast.Store()),
+ type_params=[type_param(name=name, default_value=unoptimized_binop)],
+ value=ast.Constant(value=1),
+ )
+ )
+ self.assert_ast(result_code, non_optimized_target, optimized_target)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_ast/utils.py b/Lib/test/test_ast/utils.py
new file mode 100644
index 0000000000..145e89ee94
--- /dev/null
+++ b/Lib/test/test_ast/utils.py
@@ -0,0 +1,15 @@
+def to_tuple(t):
+ if t is None or isinstance(t, (str, int, complex, float, bytes)) or t is Ellipsis:
+ return t
+ elif isinstance(t, list):
+ return [to_tuple(e) for e in t]
+ result = [t.__class__.__name__]
+ if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
+ result.append((t.lineno, t.col_offset))
+ if hasattr(t, 'end_lineno') and hasattr(t, 'end_col_offset'):
+ result[-1] += (t.end_lineno, t.end_col_offset)
+ if t._fields is None:
+ return tuple(result)
+ for f in t._fields:
+ result.append(to_tuple(getattr(t, f)))
+ return tuple(result)
diff --git a/Lib/test/test_codeop.py b/Lib/test/test_codeop.py
index 1036b970cd..c62e3748e6 100644
--- a/Lib/test/test_codeop.py
+++ b/Lib/test/test_codeop.py
@@ -227,6 +227,9 @@ def test_incomplete(self):
ai("(x for x in")
ai("(x for x in (")
+ ai('a = f"""')
+ ai('a = \\')
+
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
@@ -300,12 +303,11 @@ def test_warning(self):
warnings.simplefilter('error', SyntaxWarning)
compile_command(r"'\e'", symbol='exec')
- # TODO: RUSTPYTHON
- #def test_incomplete_warning(self):
- # with warnings.catch_warnings(record=True) as w:
- # warnings.simplefilter('always')
- # self.assertIncomplete("'\\e' + (")
- # self.assertEqual(w, [])
+ def test_incomplete_warning(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.assertIncomplete("'\\e' + (")
+ self.assertEqual(w, [])
# TODO: RUSTPYTHON
@unittest.expectedFailure
diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py
index 901f596cc3..79c050bae6 100644
--- a/Lib/test/test_collections.py
+++ b/Lib/test/test_collections.py
@@ -952,8 +952,6 @@ def __aiter__(self):
self.validate_abstract_methods(AsyncIterable, '__aiter__')
self.validate_isinstance(AsyncIterable, '__aiter__')
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_AsyncIterator(self):
class AI:
def __aiter__(self):
@@ -1152,8 +1150,6 @@ class NonCol(ColImpl):
self.assertFalse(issubclass(NonCol, Collection))
self.assertFalse(isinstance(NonCol(), Collection))
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()]
for x in non_samples:
@@ -1850,8 +1846,6 @@ def test_Set_hash_matches_frozenset(self):
fs = frozenset(s)
self.assertEqual(hash(fs), Set._hash(fs), msg=s)
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
@@ -1868,8 +1862,6 @@ def __iter__(self):
self.validate_comparison(MyMapping())
self.assertRaises(TypeError, reversed, MyMapping())
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
@@ -1904,8 +1896,6 @@ def test_MutableMapping_subclass(self):
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(z, {('orange', 3), ('red', 5)})
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_Sequence(self):
for sample in [tuple, list, bytes, str]:
self.assertIsInstance(sample(), Sequence)
@@ -1988,8 +1978,6 @@ def test_Buffer(self):
self.assertFalse(issubclass(sample, Buffer))
self.validate_abstract_methods(Buffer, '__buffer__')
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_MutableSequence(self):
for sample in [tuple, str, bytes]:
self.assertNotIsInstance(sample(), MutableSequence)
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index 9a1743da6d..95cf51bf08 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -10,7 +10,7 @@
import gc
import pickle
from test import support
-from test.support import warnings_helper, import_helper, check_disallow_instantiation
+from test.support import import_helper, check_disallow_instantiation
from itertools import permutations
from textwrap import dedent
from collections import OrderedDict
@@ -28,14 +28,20 @@ class Test_Csv(unittest.TestCase):
in TestDialectRegistry.
"""
def _test_arg_valid(self, ctor, arg):
+ ctor(arg)
self.assertRaises(TypeError, ctor)
self.assertRaises(TypeError, ctor, None)
- self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
- self.assertRaises(TypeError, ctor, arg, delimiter = 0)
- self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
+ self.assertRaises(TypeError, ctor, arg, bad_attr=0)
+ self.assertRaises(TypeError, ctor, arg, delimiter='')
+ self.assertRaises(TypeError, ctor, arg, escapechar='')
+ self.assertRaises(TypeError, ctor, arg, quotechar='')
+ self.assertRaises(TypeError, ctor, arg, delimiter='^^')
+ self.assertRaises(TypeError, ctor, arg, escapechar='^^')
+ self.assertRaises(TypeError, ctor, arg, quotechar='^^')
self.assertRaises(csv.Error, ctor, arg, 'foo')
self.assertRaises(TypeError, ctor, arg, delimiter=None)
self.assertRaises(TypeError, ctor, arg, delimiter=1)
+ self.assertRaises(TypeError, ctor, arg, escapechar=1)
self.assertRaises(TypeError, ctor, arg, quotechar=1)
self.assertRaises(TypeError, ctor, arg, lineterminator=None)
self.assertRaises(TypeError, ctor, arg, lineterminator=1)
@@ -46,11 +52,48 @@ def _test_arg_valid(self, ctor, arg):
quoting=csv.QUOTE_ALL, quotechar=None)
self.assertRaises(TypeError, ctor, arg,
quoting=csv.QUOTE_NONE, quotechar='')
+ self.assertRaises(ValueError, ctor, arg, delimiter='\n')
+ self.assertRaises(ValueError, ctor, arg, escapechar='\n')
+ self.assertRaises(ValueError, ctor, arg, quotechar='\n')
+ self.assertRaises(ValueError, ctor, arg, delimiter='\r')
+ self.assertRaises(ValueError, ctor, arg, escapechar='\r')
+ self.assertRaises(ValueError, ctor, arg, quotechar='\r')
+ ctor(arg, delimiter=' ')
+ ctor(arg, escapechar=' ')
+ ctor(arg, quotechar=' ')
+ ctor(arg, delimiter='\t', skipinitialspace=True)
+ ctor(arg, escapechar='\t', skipinitialspace=True)
+ ctor(arg, quotechar='\t', skipinitialspace=True)
+ ctor(arg, delimiter=' ', skipinitialspace=True)
+ self.assertRaises(ValueError, ctor, arg,
+ escapechar=' ', skipinitialspace=True)
+ self.assertRaises(ValueError, ctor, arg,
+ quotechar=' ', skipinitialspace=True)
+ ctor(arg, delimiter='^')
+ ctor(arg, escapechar='^')
+ ctor(arg, quotechar='^')
+ self.assertRaises(ValueError, ctor, arg, delimiter='^', escapechar='^')
+ self.assertRaises(ValueError, ctor, arg, delimiter='^', quotechar='^')
+ self.assertRaises(ValueError, ctor, arg, escapechar='^', quotechar='^')
+ ctor(arg, delimiter='\x85')
+ ctor(arg, escapechar='\x85')
+ ctor(arg, quotechar='\x85')
+ ctor(arg, lineterminator='\x85')
+ self.assertRaises(ValueError, ctor, arg,
+ delimiter='\x85', lineterminator='\x85')
+ self.assertRaises(ValueError, ctor, arg,
+ escapechar='\x85', lineterminator='\x85')
+ self.assertRaises(ValueError, ctor, arg,
+ quotechar='\x85', lineterminator='\x85')
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def test_reader_arg_valid(self):
self._test_arg_valid(csv.reader, [])
self.assertRaises(OSError, csv.reader, BadIterable())
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def test_writer_arg_valid(self):
self._test_arg_valid(csv.writer, StringIO())
class BadWriter:
@@ -150,13 +193,8 @@ def _write_error_test(self, exc, fields, **kwargs):
fileobj.seek(0)
self.assertEqual(fileobj.read(), '')
- # TODO: RUSTPYTHON ''\r\n to ""\r\n unsupported
- @unittest.expectedFailure
def test_write_arg_valid(self):
self._write_error_test(csv.Error, None)
- self._write_test((), '')
- self._write_test([None], '""')
- self._write_error_test(csv.Error, [None], quoting = csv.QUOTE_NONE)
# Check that exceptions are passed up the chain
self._write_error_test(OSError, BadIterable())
class BadList:
@@ -170,14 +208,13 @@ class BadItem:
def __str__(self):
raise OSError
self._write_error_test(OSError, [BadItem()])
-
def test_write_bigfield(self):
# This exercises the buffer realloc functionality
bigstring = 'X' * 50000
self._write_test([bigstring,bigstring], '%s,%s' % \
(bigstring, bigstring))
- # TODO: RUSTPYTHON quoting style check is unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_quoting(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"')
@@ -196,7 +233,7 @@ def test_write_quoting(self):
self._write_test(['a','',None,1], '"a","",,"1"',
quoting = csv.QUOTE_NOTNULL)
- # TODO: RUSTPYTHON doublequote check is unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_escape(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
@@ -229,7 +266,7 @@ def test_write_escape(self):
self._write_test(['C\\', '6', '7', 'X"'], 'C\\\\,6,7,"X"""',
escapechar='\\', quoting=csv.QUOTE_MINIMAL)
- # TODO: RUSTPYTHON lineterminator double char unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_lineterminator(self):
for lineterminator in '\r\n', '\n', '\r', '!@#', '\0':
@@ -238,11 +275,13 @@ def test_write_lineterminator(self):
writer = csv.writer(sio, lineterminator=lineterminator)
writer.writerow(['a', 'b'])
writer.writerow([1, 2])
+ writer.writerow(['\r', '\n'])
self.assertEqual(sio.getvalue(),
f'a,b{lineterminator}'
- f'1,2{lineterminator}')
+ f'1,2{lineterminator}'
+ f'"\r","\n"{lineterminator}')
- # TODO: RUSTPYTHON ''\r\n to ""\r\n unspported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_iterable(self):
self._write_test(iter(['a', 1, 'p,q']), 'a,1,"p,q"')
@@ -285,6 +324,53 @@ def test_writerows_with_none(self):
fileobj.seek(0)
self.assertEqual(fileobj.read(), 'a\r\n""\r\n')
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_write_empty_fields(self):
+ self._write_test((), '')
+ self._write_test([''], '""')
+ self._write_error_test(csv.Error, [''], quoting=csv.QUOTE_NONE)
+ self._write_test([''], '""', quoting=csv.QUOTE_STRINGS)
+ self._write_test([''], '""', quoting=csv.QUOTE_NOTNULL)
+ self._write_test([None], '""')
+ self._write_error_test(csv.Error, [None], quoting=csv.QUOTE_NONE)
+ self._write_error_test(csv.Error, [None], quoting=csv.QUOTE_STRINGS)
+ self._write_error_test(csv.Error, [None], quoting=csv.QUOTE_NOTNULL)
+ self._write_test(['', ''], ',')
+ self._write_test([None, None], ',')
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_write_empty_fields_space_delimiter(self):
+ self._write_test([''], '""', delimiter=' ', skipinitialspace=False)
+ self._write_test([''], '""', delimiter=' ', skipinitialspace=True)
+ self._write_test([None], '""', delimiter=' ', skipinitialspace=False)
+ self._write_test([None], '""', delimiter=' ', skipinitialspace=True)
+
+ self._write_test(['', ''], ' ', delimiter=' ', skipinitialspace=False)
+ self._write_test(['', ''], '"" ""', delimiter=' ', skipinitialspace=True)
+ self._write_test([None, None], ' ', delimiter=' ', skipinitialspace=False)
+ self._write_test([None, None], '"" ""', delimiter=' ', skipinitialspace=True)
+
+ self._write_test(['', ''], ' ', delimiter=' ', skipinitialspace=False,
+ quoting=csv.QUOTE_NONE)
+ self._write_error_test(csv.Error, ['', ''],
+ delimiter=' ', skipinitialspace=True,
+ quoting=csv.QUOTE_NONE)
+ for quoting in csv.QUOTE_STRINGS, csv.QUOTE_NOTNULL:
+ self._write_test(['', ''], '"" ""', delimiter=' ', skipinitialspace=False,
+ quoting=quoting)
+ self._write_test(['', ''], '"" ""', delimiter=' ', skipinitialspace=True,
+ quoting=quoting)
+
+ for quoting in csv.QUOTE_NONE, csv.QUOTE_STRINGS, csv.QUOTE_NOTNULL:
+ self._write_test([None, None], ' ', delimiter=' ', skipinitialspace=False,
+ quoting=quoting)
+ self._write_error_test(csv.Error, [None, None],
+ delimiter=' ', skipinitialspace=True,
+ quoting=quoting)
+
def test_writerows_errors(self):
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
writer = csv.writer(fileobj)
@@ -296,7 +382,7 @@ def _read_test(self, input, expect, **kwargs):
result = list(reader)
self.assertEqual(result, expect)
- # TODO RUSTPYTHON strict mode is unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_oddinputs(self):
self._read_test([], [])
@@ -308,16 +394,23 @@ def test_read_oddinputs(self):
self.assertRaises(csv.Error, self._read_test,
[b'abc'], None)
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def test_read_eol(self):
- self._read_test(['a,b'], [['a','b']])
- self._read_test(['a,b\n'], [['a','b']])
- self._read_test(['a,b\r\n'], [['a','b']])
- self._read_test(['a,b\r'], [['a','b']])
- self.assertRaises(csv.Error, self._read_test, ['a,b\rc,d'], [])
- self.assertRaises(csv.Error, self._read_test, ['a,b\nc,d'], [])
- self.assertRaises(csv.Error, self._read_test, ['a,b\r\nc,d'], [])
-
- # TODO RUSTPYTHON double quote umimplement
+ self._read_test(['a,b', 'c,d'], [['a','b'], ['c','d']])
+ self._read_test(['a,b\n', 'c,d\n'], [['a','b'], ['c','d']])
+ self._read_test(['a,b\r\n', 'c,d\r\n'], [['a','b'], ['c','d']])
+ self._read_test(['a,b\r', 'c,d\r'], [['a','b'], ['c','d']])
+
+ errmsg = "with newline=''"
+ with self.assertRaisesRegex(csv.Error, errmsg):
+ next(csv.reader(['a,b\rc,d']))
+ with self.assertRaisesRegex(csv.Error, errmsg):
+ next(csv.reader(['a,b\nc,d']))
+ with self.assertRaisesRegex(csv.Error, errmsg):
+ next(csv.reader(['a,b\r\nc,d']))
+
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_eof(self):
self._read_test(['a,"'], [['a', '']])
@@ -328,7 +421,7 @@ def test_read_eof(self):
self.assertRaises(csv.Error, self._read_test,
['^'], [], escapechar='^', strict=True)
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_nul(self):
self._read_test(['\0'], [['\0']])
@@ -342,7 +435,7 @@ def test_read_delimiter(self):
self._read_test(['a;b;c'], [['a', 'b', 'c']], delimiter=';')
self._read_test(['a\0b\0c'], [['a', 'b', 'c']], delimiter='\0')
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_escape(self):
self._read_test(['a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
@@ -356,7 +449,7 @@ def test_read_escape(self):
self._read_test(['a,\\b,c'], [['a', '\\b', 'c']], escapechar=None)
self._read_test(['a,\\b,c'], [['a', '\\b', 'c']])
- # TODO RUSTPYTHON escapechar unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_quoting(self):
self._read_test(['1,",3,",5'], [['1', ',3,', '5']])
@@ -367,17 +460,58 @@ def test_read_quoting(self):
# will this fail where locale uses comma for decimals?
self._read_test([',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]],
quoting=csv.QUOTE_NONNUMERIC)
+ self._read_test([',3,"5",7.3, 9'], [[None, '3', '5', '7.3', ' 9']],
+ quoting=csv.QUOTE_NOTNULL)
+ self._read_test([',3,"5",7.3, 9'], [[None, 3, '5', 7.3, 9]],
+ quoting=csv.QUOTE_STRINGS)
+
+ self._read_test([',,"",'], [['', '', '', '']])
+ self._read_test([',,"",'], [['', '', '', '']],
+ quoting=csv.QUOTE_NONNUMERIC)
+ self._read_test([',,"",'], [[None, None, '', None]],
+ quoting=csv.QUOTE_NOTNULL)
+ self._read_test([',,"",'], [[None, None, '', None]],
+ quoting=csv.QUOTE_STRINGS)
+
self._read_test(['"a\nb", 7'], [['a\nb', ' 7']])
self.assertRaises(ValueError, self._read_test,
['abc,3'], [[]],
quoting=csv.QUOTE_NONNUMERIC)
+ self.assertRaises(ValueError, self._read_test,
+ ['abc,3'], [[]],
+ quoting=csv.QUOTE_STRINGS)
self._read_test(['1,@,3,@,5'], [['1', ',3,', '5']], quotechar='@')
self._read_test(['1,\0,3,\0,5'], [['1', ',3,', '5']], quotechar='\0')
+ self._read_test(['1\\.5,\\.5,.5'], [[1.5, 0.5, 0.5]],
+ quoting=csv.QUOTE_NONNUMERIC, escapechar='\\')
+ self._read_test(['1\\.5,\\.5,"\\.5"'], [[1.5, 0.5, ".5"]],
+ quoting=csv.QUOTE_STRINGS, escapechar='\\')
+ # TODO: RUSTPYTHON; panic
+ @unittest.skip("TODO: RUSTPYTHON; slice index starts at 1 but ends at 0")
def test_read_skipinitialspace(self):
self._read_test(['no space, space, spaces,\ttab'],
[['no space', 'space', 'spaces', '\ttab']],
skipinitialspace=True)
+ self._read_test([' , , '],
+ [['', '', '']],
+ skipinitialspace=True)
+ self._read_test([' , , '],
+ [[None, None, None]],
+ skipinitialspace=True, quoting=csv.QUOTE_NOTNULL)
+ self._read_test([' , , '],
+ [[None, None, None]],
+ skipinitialspace=True, quoting=csv.QUOTE_STRINGS)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_read_space_delimiter(self):
+ self._read_test(['a b', ' a ', ' ', ''],
+ [['a', '', '', 'b'], ['', '', 'a', '', ''], ['', '', ''], []],
+ delimiter=' ', skipinitialspace=False)
+ self._read_test(['a b', ' a ', ' ', ''],
+ [['a', 'b'], ['a', ''], [''], []],
+ delimiter=' ', skipinitialspace=True)
def test_read_bigfield(self):
# This exercises the buffer realloc functionality and field size
@@ -410,27 +544,49 @@ def test_read_linenum(self):
self.assertRaises(StopIteration, next, r)
self.assertEqual(r.line_num, 3)
- # TODO: RUSTPYTHON only '\r\n' unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_roundtrip_quoteed_newlines(self):
- with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
- writer = csv.writer(fileobj)
- rows = [['a\nb','b'],['c','x\r\nd']]
- writer.writerows(rows)
- fileobj.seek(0)
- for i, row in enumerate(csv.reader(fileobj)):
- self.assertEqual(row, rows[i])
+ rows = [
+ ['\na', 'b\nc', 'd\n'],
+ ['\re', 'f\rg', 'h\r'],
+ ['\r\ni', 'j\r\nk', 'l\r\n'],
+ ['\n\rm', 'n\n\ro', 'p\n\r'],
+ ['\r\rq', 'r\r\rs', 't\r\r'],
+ ['\n\nu', 'v\n\nw', 'x\n\n'],
+ ]
+ for lineterminator in '\r\n', '\n', '\r':
+ with self.subTest(lineterminator=lineterminator):
+ with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
+ writer = csv.writer(fileobj, lineterminator=lineterminator)
+ writer.writerows(rows)
+ fileobj.seek(0)
+ for i, row in enumerate(csv.reader(fileobj)):
+ self.assertEqual(row, rows[i])
- # TODO: RUSTPYTHON only '\r\n' unsupported
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_roundtrip_escaped_unquoted_newlines(self):
- with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
- writer = csv.writer(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")
- rows = [['a\nb','b'],['c','x\r\nd']]
- writer.writerows(rows)
- fileobj.seek(0)
- for i, row in enumerate(csv.reader(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")):
- self.assertEqual(row,rows[i])
+ rows = [
+ ['\na', 'b\nc', 'd\n'],
+ ['\re', 'f\rg', 'h\r'],
+ ['\r\ni', 'j\r\nk', 'l\r\n'],
+ ['\n\rm', 'n\n\ro', 'p\n\r'],
+ ['\r\rq', 'r\r\rs', 't\r\r'],
+ ['\n\nu', 'v\n\nw', 'x\n\n'],
+ ]
+ for lineterminator in '\r\n', '\n', '\r':
+ with self.subTest(lineterminator=lineterminator):
+ with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
+ writer = csv.writer(fileobj, lineterminator=lineterminator,
+ quoting=csv.QUOTE_NONE, escapechar="\\")
+ writer.writerows(rows)
+ fileobj.seek(0)
+ for i, row in enumerate(csv.reader(fileobj,
+ quoting=csv.QUOTE_NONE,
+ escapechar="\\")):
+ self.assertEqual(row, rows[i])
+
class TestDialectRegistry(unittest.TestCase):
def test_registry_badargs(self):
@@ -509,10 +665,10 @@ class space(csv.excel):
escapechar = "\\"
with TemporaryFile("w+", encoding="utf-8") as fileobj:
- fileobj.write("abc def\nc1ccccc1 benzene\n")
+ fileobj.write("abc def\nc1ccccc1 benzene\n")
fileobj.seek(0)
reader = csv.reader(fileobj, dialect=space())
- self.assertEqual(next(reader), ["abc", "def"])
+ self.assertEqual(next(reader), ["abc", "", "", "def"])
self.assertEqual(next(reader), ["c1ccccc1", "benzene"])
def compare_dialect_123(self, expected, *writeargs, **kwwriteargs):
@@ -556,14 +712,6 @@ class unspecified():
finally:
csv.unregister_dialect('testC')
- def test_bad_dialect(self):
- # Unknown parameter
- self.assertRaises(TypeError, csv.reader, [], bad_attr = 0)
- # Bad values
- self.assertRaises(TypeError, csv.reader, [], delimiter = None)
- self.assertRaises(TypeError, csv.reader, [], quoting = -1)
- self.assertRaises(TypeError, csv.reader, [], quoting = 100)
-
def test_copy(self):
for name in csv.list_dialects():
dialect = csv.get_dialect(name)
@@ -657,7 +805,7 @@ def test_quoted_quote(self):
'"I see," said the blind man',
'as he picked up his hammer and saw']])
- # Rustpython TODO
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_quoted_nl(self):
input = '''\
@@ -699,12 +847,12 @@ class EscapedExcel(csv.excel):
class TestEscapedExcel(TestCsvBase):
dialect = EscapedExcel()
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], 'abc\\,def\r\n')
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_escape_fieldsep(self):
self.readerAssertEqual('abc\\,def\r\n', [['abc,def']])
@@ -712,7 +860,7 @@ def test_read_escape_fieldsep(self):
class TestDialectUnix(TestCsvBase):
dialect = 'unix'
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_simple_writer(self):
self.writerAssertEqual([[1, 'abc def', 'abc']], '"1","abc def","abc"\n')
@@ -730,7 +878,7 @@ class TestQuotedEscapedExcel(TestCsvBase):
def test_write_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_escape_fieldsep(self):
self.readerAssertEqual('"abc\\,def"\r\n', [['abc,def']])
@@ -928,7 +1076,7 @@ def test_read_multi(self):
"s1": 'abc',
"s2": 'def'})
- # TODO RUSTPYTHON
+ # TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_with_blanks(self):
reader = csv.DictReader(["1,2,abc,4,5,6\r\n","\r\n",
@@ -981,9 +1129,11 @@ def test_float_write(self):
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def test_char_write(self):
import array, string
- a = array.array('u', string.ascii_letters)
+ a = array.array('w', string.ascii_letters)
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
@@ -1007,6 +1157,12 @@ class mydialect(csv.Dialect):
mydialect.quoting = None
self.assertRaises(csv.Error, mydialect)
+ mydialect.quoting = 42
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ 'bad "quoting" value')
+
mydialect.doublequote = True
mydialect.quoting = csv.QUOTE_ALL
mydialect.quotechar = '"'
@@ -1122,11 +1278,18 @@ class mydialect(csv.Dialect):
self.assertEqual(str(cm.exception),
'"lineterminator" must be a string')
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def test_invalid_chars(self):
- def create_invalid(field_name, value):
+ def create_invalid(field_name, value, **kwargs):
class mydialect(csv.Dialect):
- pass
+ delimiter = ','
+ quoting = csv.QUOTE_ALL
+ quotechar = '"'
+ lineterminator = '\r\n'
setattr(mydialect, field_name, value)
+ for field_name, value in kwargs.items():
+ setattr(mydialect, field_name, value)
d = mydialect()
for field_name in ("delimiter", "escapechar", "quotechar"):
@@ -1135,6 +1298,11 @@ class mydialect(csv.Dialect):
self.assertRaises(csv.Error, create_invalid, field_name, "abc")
self.assertRaises(csv.Error, create_invalid, field_name, b'x')
self.assertRaises(csv.Error, create_invalid, field_name, 5)
+ self.assertRaises(ValueError, create_invalid, field_name, "\n")
+ self.assertRaises(ValueError, create_invalid, field_name, "\r")
+ if field_name != "delimiter":
+ self.assertRaises(ValueError, create_invalid, field_name, " ",
+ skipinitialspace=True)
class TestSniffer(unittest.TestCase):
@@ -1451,8 +1619,7 @@ def test_ordered_dict_reader(self):
class MiscTestCase(unittest.TestCase):
def test__all__(self):
- extra = {'__doc__', '__version__'}
- support.check__all__(self, csv, ('csv', '_csv'), extra=extra)
+ support.check__all__(self, csv, ('csv', '_csv'))
def test_subclassable(self):
# issue 44089
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index 0493d6a41d..163ca92bb4 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -24,6 +24,7 @@
with the corresponding argument.
"""
+import logging
import math
import os, sys
import operator
@@ -812,8 +813,6 @@ def test_explicit_context_create_from_float(self):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
@@ -831,6 +830,11 @@ class CExplicitConstructionTest(ExplicitConstructionTest, unittest.TestCase):
class PyExplicitConstructionTest(ExplicitConstructionTest, unittest.TestCase):
decimal = P
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_unicode_digits(self): # TODO(RUSTPYTHON): Remove this test when it pass
+ return super().test_unicode_digits()
+
class ImplicitConstructionTest:
'''Unit tests for Implicit Construction cases of Decimal.'''
@@ -916,8 +920,6 @@ class PyImplicitConstructionTest(ImplicitConstructionTest, unittest.TestCase):
class FormatTest:
'''Unit tests for the format function.'''
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_formatting(self):
Decimal = self.decimal.Decimal
@@ -1113,6 +1115,13 @@ def test_formatting(self):
('z>z6.1f', '-0.', 'zzz0.0'),
('x>z6.1f', '-0.', 'xxx0.0'),
('🖤>z6.1f', '-0.', '🖤🖤🖤0.0'), # multi-byte fill char
+ ('\x00>z6.1f', '-0.', '\x00\x00\x000.0'), # null fill char
+
+ # issue 114563 ('z' format on F type in cdecimal)
+ ('z3,.10F', '-6.24E-323', '0.0000000000'),
+
+ # issue 91060 ('#' format in cdecimal)
+ ('#', '0', '0.'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
@@ -1128,8 +1137,6 @@ def test_formatting(self):
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_negative_zero_format_directed_rounding(self):
with self.decimal.localcontext() as ctx:
ctx.rounding = ROUND_CEILING
@@ -1228,7 +1235,31 @@ def get_fmt(x, override=None, fmt='n'):
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
- @run_with_locale('LC_ALL', 'ps_AF')
+ def test_deprecated_N_format(self):
+ Decimal = self.decimal.Decimal
+ h = Decimal('6.62607015e-34')
+ if self.decimal == C:
+ with self.assertWarns(DeprecationWarning) as cm:
+ r = format(h, 'N')
+ self.assertEqual(cm.filename, __file__)
+ self.assertEqual(r, format(h, 'n').upper())
+ with self.assertWarns(DeprecationWarning) as cm:
+ r = format(h, '010.3N')
+ self.assertEqual(cm.filename, __file__)
+ self.assertEqual(r, format(h, '010.3n').upper())
+ else:
+ self.assertRaises(ValueError, format, h, 'N')
+ self.assertRaises(ValueError, format, h, '010.3N')
+ with warnings_helper.check_no_warnings(self):
+ self.assertEqual(format(h, 'N>10.3'), 'NN6.63E-34')
+ self.assertEqual(format(h, 'N>10.3n'), 'NN6.63e-34')
+ self.assertEqual(format(h, 'N>10.3e'), 'N6.626e-34')
+ self.assertEqual(format(h, 'N>10.3f'), 'NNNNN0.000')
+ self.assertRaises(ValueError, format, h, '>Nf')
+ self.assertRaises(ValueError, format, h, '10Nf')
+ self.assertRaises(ValueError, format, h, 'Nx')
+
+ @run_with_locale('LC_ALL', 'ps_AF', '')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
@@ -1911,8 +1942,6 @@ def hashit(d):
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_hash_method_nan(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, hash, Decimal('sNaN'))
@@ -2048,7 +2077,9 @@ def test_tonum_methods(self):
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
+ ('-123.456', -4, '-0E+4'),
('123.456', -3, '0E+3'),
+ ('-123.456', -3, '-0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
@@ -2718,8 +2749,6 @@ def test_quantize(self):
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_complex(self):
Decimal = self.decimal.Decimal
@@ -2894,6 +2923,11 @@ class CPythonAPItests(PythonAPItests, unittest.TestCase):
class PyPythonAPItests(PythonAPItests, unittest.TestCase):
decimal = P
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_complex(self): # TODO(RUSTPYTHON): Remove this test when it pass
+ return super().test_complex()
+
class ContextAPItests:
def test_none_args(self):
@@ -3663,8 +3697,6 @@ def test_localcontextarg(self):
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_localcontext_kwargs(self):
with self.decimal.localcontext(
prec=10, rounding=ROUND_HALF_DOWN,
@@ -3693,8 +3725,6 @@ def test_localcontext_kwargs(self):
self.assertRaises(TypeError, self.decimal.localcontext, Emin="")
self.assertRaises(TypeError, self.decimal.localcontext, Emax="")
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_local_context_kwargs_does_not_overwrite_existing_argument(self):
ctx = self.decimal.getcontext()
orig_prec = ctx.prec
@@ -4362,7 +4392,8 @@ def test_module_attributes(self):
self.assertEqual(C.__version__, P.__version__)
- self.assertEqual(dir(C), dir(P))
+ self.assertLessEqual(set(dir(C)), set(dir(P)))
+ self.assertEqual([n for n in dir(C) if n[:2] != '__'], sorted(P.__all__))
def test_context_attributes(self):
@@ -4438,6 +4469,15 @@ def test_implicit_context(self):
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
+ if self.decimal == C:
+ self.assertEqual(pow(10, Decimal(2), 7), 2)
+ self.assertEqual(pow(10, 2, Decimal(7)), 2)
+ else:
+ # XXX: Three-arg power doesn't use __rpow__.
+ self.assertRaises(TypeError, pow, 10, Decimal(2), 7)
+ # XXX: There is no special method to dispatch on the
+ # third arg of three-arg power.
+ self.assertRaises(TypeError, pow, 10, 2, Decimal(7))
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
@@ -4648,6 +4688,11 @@ def tearDown(self):
sys.set_int_max_str_digits(self._previous_int_limit)
super().tearDown()
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_implicit_context(self): # TODO(RUSTPYTHON): Remove this test when it pass
+ return super().test_implicit_context()
+
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
@@ -4699,9 +4744,33 @@ def test_py_exact_power(self):
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
+ self.assertEqual(x, Decimal('3e-6'))
+ c.prec = 2
+ x = Decimal("152587890625") ** Decimal('-0.5')
+ self.assertEqual(x, Decimal('2.6e-6'))
+ c.prec = 3
+ x = Decimal("152587890625") ** Decimal('-0.5')
+ self.assertEqual(x, Decimal('2.56e-6'))
+ c.prec = 28
+ x = Decimal("152587890625") ** Decimal('-0.5')
+ self.assertEqual(x, Decimal('2.56e-6'))
+
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
+ # See https://github.com/python/cpython/issues/118027
+ # Testing for an exact power could appear to hang, in the Python
+ # version, as it attempted to compute 10**(MAX_EMAX + 1).
+ # Fixed via https://github.com/python/cpython/pull/118503.
+ c.prec = P.MAX_PREC
+ c.Emax = P.MAX_EMAX
+ c.Emin = P.MIN_EMIN
+ c.traps[P.Inexact] = 1
+ D2 = Decimal(2)
+ # If the bug is still present, the next statement won't complete.
+ res = D2 ** 117
+ self.assertEqual(res, 1 << 117)
+
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
@@ -5625,6 +5694,25 @@ def __abs__(self):
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
+ def test_c_immutable_types(self):
+ SignalDict = type(C.Context().flags)
+ SignalDictMixin = SignalDict.__bases__[0]
+ ContextManager = type(C.localcontext())
+ types = (
+ SignalDictMixin,
+ ContextManager,
+ C.Decimal,
+ C.Context,
+ )
+ for tp in types:
+ with self.subTest(tp=tp):
+ with self.assertRaisesRegex(TypeError, "immutable"):
+ tp.foo = 1
+
+ def test_c_disallow_instantiation(self):
+ ContextManager = type(C.localcontext())
+ check_disallow_instantiation(self, ContextManager)
+
def test_c_signaldict_segfault(self):
# See gh-106263 for details.
SignalDict = type(C.Context().flags)
@@ -5655,6 +5743,20 @@ def test_c_signaldict_segfault(self):
with self.assertRaisesRegex(ValueError, err_msg):
sd.copy()
+ def test_format_fallback_capitals(self):
+ # Fallback to _pydecimal formatting (triggered by `#` format which
+ # is unsupported by mpdecimal) should honor the current context.
+ x = C.Decimal('6.09e+23')
+ self.assertEqual(format(x, '#'), '6.09E+23')
+ with C.localcontext(capitals=0):
+ self.assertEqual(format(x, '#'), '6.09e+23')
+
+ def test_format_fallback_rounding(self):
+ y = C.Decimal('6.09')
+ self.assertEqual(format(y, '#.1f'), '6.1')
+ with C.localcontext(rounding=C.ROUND_DOWN):
+ self.assertEqual(format(y, '#.1f'), '6.0')
+
@requires_docstrings
@requires_cdecimal
class SignatureTest(unittest.TestCase):
@@ -5818,13 +5920,17 @@ def load_tests(loader, tests, pattern):
if TODO_TESTS is None:
from doctest import DocTestSuite, IGNORE_EXCEPTION_DETAIL
+ orig_context = orig_sys_decimal.getcontext().copy()
for mod in C, P:
if not mod:
continue
def setUp(slf, mod=mod):
sys.modules['decimal'] = mod
- def tearDown(slf):
+ init(mod)
+ def tearDown(slf, mod=mod):
sys.modules['decimal'] = orig_sys_decimal
+ mod.setcontext(ORIGINAL_CONTEXT[mod].copy())
+ orig_sys_decimal.setcontext(orig_context.copy())
optionflags = IGNORE_EXCEPTION_DETAIL if mod is C else 0
sys.modules['decimal'] = mod
tests.addTest(DocTestSuite(mod, setUp=setUp, tearDown=tearDown,
@@ -5839,11 +5945,12 @@ def setUpModule():
TEST_ALL = ARITH if ARITH is not None else is_resource_enabled('decimal')
def tearDownModule():
- if C: C.setcontext(ORIGINAL_CONTEXT[C])
- P.setcontext(ORIGINAL_CONTEXT[P])
+ if C: C.setcontext(ORIGINAL_CONTEXT[C].copy())
+ P.setcontext(ORIGINAL_CONTEXT[P].copy())
if not C:
- warnings.warn('C tests skipped: no module named _decimal.',
- UserWarning)
+ logging.getLogger(__name__).warning(
+ 'C tests skipped: no module named _decimal.'
+ )
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 7698c340c8..6f1849738c 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -5074,8 +5074,6 @@ class Child(Parent):
gc.collect()
self.assertEqual(Parent.__subclasses__(), [])
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test_attr_raise_through_property(self):
# test case for gh-103272
class A:
diff --git a/Lib/test/test_enumerate.py b/Lib/test/test_enumerate.py
index 5785cb4649..5cb54cff9b 100644
--- a/Lib/test/test_enumerate.py
+++ b/Lib/test/test_enumerate.py
@@ -2,6 +2,7 @@
import operator
import sys
import pickle
+import gc
from test import support
@@ -127,6 +128,18 @@ def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
+ def test_kwargs(self):
+ self.assertEqual(list(self.enum(iterable=Ig(self.seq))), self.res)
+ expected = list(self.enum(Ig(self.seq), 0))
+ self.assertEqual(list(self.enum(iterable=Ig(self.seq), start=0)),
+ expected)
+ self.assertEqual(list(self.enum(start=0, iterable=Ig(self.seq))),
+ expected)
+ self.assertRaises(TypeError, self.enum, iterable=[], x=3)
+ self.assertRaises(TypeError, self.enum, start=0, x=3)
+ self.assertRaises(TypeError, self.enum, x=0, y=3)
+ self.assertRaises(TypeError, self.enum, x=0)
+
@support.cpython_only
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
@@ -134,6 +147,18 @@ def test_tuple_reuse(self):
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
+ @support.cpython_only
+ def test_enumerate_result_gc(self):
+ # bpo-42536: enumerate's tuple-reuse speed trick breaks the GC's
+ # assumptions about what can be untracked. Make sure we re-track result
+ # tuples whenever we reuse them.
+ it = self.enum([[]])
+ gc.collect()
+ # That GC collection probably untracked the recycled internal result
+ # tuple, which is initialized to (None, None). Make sure it's re-tracked
+ # when it's mutated and returned from __next__:
+ self.assertTrue(gc.is_tracked(next(it)))
+
class MyEnum(enumerate):
pass
@@ -253,14 +278,16 @@ def test_basicfunction(self):
class TestStart(EnumerateStartTestCase):
+ def enum(self, iterable, start=11):
+ return enumerate(iterable, start=start)
- enum = lambda self, i: enumerate(i, start=11)
seq, res = 'abc', [(11, 'a'), (12, 'b'), (13, 'c')]
class TestLongStart(EnumerateStartTestCase):
+ def enum(self, iterable, start=sys.maxsize + 1):
+ return enumerate(iterable, start=start)
- enum = lambda self, i: enumerate(i, start=sys.maxsize+1)
seq, res = 'abc', [(sys.maxsize+1,'a'), (sys.maxsize+2,'b'),
(sys.maxsize+3,'c')]
diff --git a/Lib/test/test_exception_group.py b/Lib/test/test_exception_group.py
index d0d81490df..ffe4dc4f35 100644
--- a/Lib/test/test_exception_group.py
+++ b/Lib/test/test_exception_group.py
@@ -1,7 +1,7 @@
import collections.abc
import types
import unittest
-from test.support import C_RECURSION_LIMIT
+from test.support import get_c_recursion_limit
class TestExceptionGroupTypeHierarchy(unittest.TestCase):
def test_exception_group_types(self):
@@ -300,6 +300,15 @@ def assertMatchesTemplate(self, exc, exc_type, template):
self.assertEqual(type(exc), type(template))
self.assertEqual(exc.args, template.args)
+class Predicate:
+ def __init__(self, func):
+ self.func = func
+
+ def __call__(self, e):
+ return self.func(e)
+
+ def method(self, e):
+ return self.func(e)
class ExceptionGroupSubgroupTests(ExceptionGroupTestBase):
def setUp(self):
@@ -307,10 +316,15 @@ def setUp(self):
self.eg_template = [ValueError(1), TypeError(int), ValueError(2)]
def test_basics_subgroup_split__bad_arg_type(self):
+ class C:
+ pass
+
bad_args = ["bad arg",
+ C,
OSError('instance not type'),
[OSError, TypeError],
- (OSError, 42)]
+ (OSError, 42),
+ ]
for arg in bad_args:
with self.assertRaises(TypeError):
self.eg.subgroup(arg)
@@ -342,10 +356,14 @@ def test_basics_subgroup_by_type__match(self):
self.assertMatchesTemplate(subeg, ExceptionGroup, template)
def test_basics_subgroup_by_predicate__passthrough(self):
- self.assertIs(self.eg, self.eg.subgroup(lambda e: True))
+ f = lambda e: True
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ self.assertIs(self.eg, self.eg.subgroup(callable))
def test_basics_subgroup_by_predicate__no_match(self):
- self.assertIsNone(self.eg.subgroup(lambda e: False))
+ f = lambda e: False
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ self.assertIsNone(self.eg.subgroup(callable))
def test_basics_subgroup_by_predicate__match(self):
eg = self.eg
@@ -356,9 +374,12 @@ def test_basics_subgroup_by_predicate__match(self):
((ValueError, TypeError), self.eg_template)]
for match_type, template in testcases:
- subeg = eg.subgroup(lambda e: isinstance(e, match_type))
- self.assertEqual(subeg.message, eg.message)
- self.assertMatchesTemplate(subeg, ExceptionGroup, template)
+ f = lambda e: isinstance(e, match_type)
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ with self.subTest(callable=callable):
+ subeg = eg.subgroup(f)
+ self.assertEqual(subeg.message, eg.message)
+ self.assertMatchesTemplate(subeg, ExceptionGroup, template)
class ExceptionGroupSplitTests(ExceptionGroupTestBase):
@@ -405,14 +426,18 @@ def test_basics_split_by_type__match(self):
self.assertIsNone(rest)
def test_basics_split_by_predicate__passthrough(self):
- match, rest = self.eg.split(lambda e: True)
- self.assertMatchesTemplate(match, ExceptionGroup, self.eg_template)
- self.assertIsNone(rest)
+ f = lambda e: True
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ match, rest = self.eg.split(callable)
+ self.assertMatchesTemplate(match, ExceptionGroup, self.eg_template)
+ self.assertIsNone(rest)
def test_basics_split_by_predicate__no_match(self):
- match, rest = self.eg.split(lambda e: False)
- self.assertIsNone(match)
- self.assertMatchesTemplate(rest, ExceptionGroup, self.eg_template)
+ f = lambda e: False
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ match, rest = self.eg.split(callable)
+ self.assertIsNone(match)
+ self.assertMatchesTemplate(rest, ExceptionGroup, self.eg_template)
def test_basics_split_by_predicate__match(self):
eg = self.eg
@@ -426,20 +451,22 @@ def test_basics_split_by_predicate__match(self):
]
for match_type, match_template, rest_template in testcases:
- match, rest = eg.split(lambda e: isinstance(e, match_type))
- self.assertEqual(match.message, eg.message)
- self.assertMatchesTemplate(
- match, ExceptionGroup, match_template)
- if rest_template is not None:
- self.assertEqual(rest.message, eg.message)
+ f = lambda e: isinstance(e, match_type)
+ for callable in [f, Predicate(f), Predicate(f).method]:
+ match, rest = eg.split(callable)
+ self.assertEqual(match.message, eg.message)
self.assertMatchesTemplate(
- rest, ExceptionGroup, rest_template)
+ match, ExceptionGroup, match_template)
+ if rest_template is not None:
+ self.assertEqual(rest.message, eg.message)
+ self.assertMatchesTemplate(
+ rest, ExceptionGroup, rest_template)
class DeepRecursionInSplitAndSubgroup(unittest.TestCase):
def make_deep_eg(self):
e = TypeError(1)
- for i in range(C_RECURSION_LIMIT + 1):
+ for i in range(get_c_recursion_limit() + 1):
e = ExceptionGroup('eg', [e])
return e
diff --git a/Lib/test/test_exception_hierarchy.py b/Lib/test/test_exception_hierarchy.py
index efee88cd5e..e2f2844512 100644
--- a/Lib/test/test_exception_hierarchy.py
+++ b/Lib/test/test_exception_hierarchy.py
@@ -127,7 +127,6 @@ def test_windows_error(self):
else:
self.assertNotIn('winerror', dir(OSError))
- @unittest.skip("TODO: RUSTPYTHON")
def test_posix_error(self):
e = OSError(EEXIST, "File already exists", "foo.txt")
self.assertEqual(e.errno, EEXIST)
diff --git a/Lib/test/test_exception_variations.py b/Lib/test/test_exception_variations.py
index d874b0e3d1..e103eaf846 100644
--- a/Lib/test/test_exception_variations.py
+++ b/Lib/test/test_exception_variations.py
@@ -1,7 +1,7 @@
import unittest
-class ExceptionTestCase(unittest.TestCase):
+class ExceptTestCases(unittest.TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
@@ -172,5 +172,406 @@ def test_nested_else(self):
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
+ def test_nested_exception_in_except(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+
+ try:
+ try:
+ raise Exception('inner exception')
+ except:
+ hit_inner_except = True
+ raise Exception('outer exception')
+ else:
+ hit_inner_else = True
+ except:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_inner_except)
+ self.assertFalse(hit_inner_else)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+ def test_nested_exception_in_else(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+
+ try:
+ try:
+ pass
+ except:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+ raise Exception('outer exception')
+ except:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_inner_except)
+ self.assertTrue(hit_inner_else)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+ def test_nested_exception_in_finally_no_exception(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+ hit_inner_finally = False
+
+ try:
+ try:
+ pass
+ except:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+ finally:
+ hit_inner_finally = True
+ raise Exception('outer exception')
+ except:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_inner_except)
+ self.assertTrue(hit_inner_else)
+ self.assertTrue(hit_inner_finally)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+ def test_nested_exception_in_finally_with_exception(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+ hit_inner_finally = False
+
+ try:
+ try:
+ raise Exception('inner exception')
+ except:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+ finally:
+ hit_inner_finally = True
+ raise Exception('outer exception')
+ except:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+
+ self.assertTrue(hit_inner_except)
+ self.assertFalse(hit_inner_else)
+ self.assertTrue(hit_inner_finally)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+
+# TODO: RUSTPYTHON
+'''
+class ExceptStarTestCases(unittest.TestCase):
+ def test_try_except_else_finally(self):
+ hit_except = False
+ hit_else = False
+ hit_finally = False
+
+ try:
+ raise Exception('nyaa!')
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_except)
+ self.assertTrue(hit_finally)
+ self.assertFalse(hit_else)
+
+ def test_try_except_else_finally_no_exception(self):
+ hit_except = False
+ hit_else = False
+ hit_finally = False
+
+ try:
+ pass
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_except)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_else)
+
+ def test_try_except_finally(self):
+ hit_except = False
+ hit_finally = False
+
+ try:
+ raise Exception('yarr!')
+ except* BaseException:
+ hit_except = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_except)
+ self.assertTrue(hit_finally)
+
+ def test_try_except_finally_no_exception(self):
+ hit_except = False
+ hit_finally = False
+
+ try:
+ pass
+ except* BaseException:
+ hit_except = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_except)
+ self.assertTrue(hit_finally)
+
+ def test_try_except(self):
+ hit_except = False
+
+ try:
+ raise Exception('ahoy!')
+ except* BaseException:
+ hit_except = True
+
+ self.assertTrue(hit_except)
+
+ def test_try_except_no_exception(self):
+ hit_except = False
+
+ try:
+ pass
+ except* BaseException:
+ hit_except = True
+
+ self.assertFalse(hit_except)
+
+ def test_try_except_else(self):
+ hit_except = False
+ hit_else = False
+
+ try:
+ raise Exception('foo!')
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_except)
+
+ def test_try_except_else_no_exception(self):
+ hit_except = False
+ hit_else = False
+
+ try:
+ pass
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+
+ self.assertFalse(hit_except)
+ self.assertTrue(hit_else)
+
+ def test_try_finally_no_exception(self):
+ hit_finally = False
+
+ try:
+ pass
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_finally)
+
+ def test_nested(self):
+ hit_finally = False
+ hit_inner_except = False
+ hit_inner_finally = False
+
+ try:
+ try:
+ raise Exception('inner exception')
+ except* BaseException:
+ hit_inner_except = True
+ finally:
+ hit_inner_finally = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_inner_except)
+ self.assertTrue(hit_inner_finally)
+ self.assertTrue(hit_finally)
+
+ def test_nested_else(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+
+ try:
+ try:
+ pass
+ except* BaseException:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+
+ raise Exception('outer exception')
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_inner_except)
+ self.assertTrue(hit_inner_else)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+ def test_nested_mixed1(self):
+ hit_except = False
+ hit_finally = False
+ hit_inner_except = False
+ hit_inner_finally = False
+
+ try:
+ try:
+ raise Exception('inner exception')
+ except* BaseException:
+ hit_inner_except = True
+ finally:
+ hit_inner_finally = True
+ except:
+ hit_except = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_inner_except)
+ self.assertTrue(hit_inner_finally)
+ self.assertFalse(hit_except)
+ self.assertTrue(hit_finally)
+
+ def test_nested_mixed2(self):
+ hit_except = False
+ hit_finally = False
+ hit_inner_except = False
+ hit_inner_finally = False
+
+ try:
+ try:
+ raise Exception('inner exception')
+ except:
+ hit_inner_except = True
+ finally:
+ hit_inner_finally = True
+ except* BaseException:
+ hit_except = True
+ finally:
+ hit_finally = True
+
+ self.assertTrue(hit_inner_except)
+ self.assertTrue(hit_inner_finally)
+ self.assertFalse(hit_except)
+ self.assertTrue(hit_finally)
+
+
+ def test_nested_else_mixed1(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+
+ try:
+ try:
+ pass
+ except* BaseException:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+
+ raise Exception('outer exception')
+ except:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_inner_except)
+ self.assertTrue(hit_inner_else)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+
+ def test_nested_else_mixed2(self):
+ hit_else = False
+ hit_finally = False
+ hit_except = False
+ hit_inner_except = False
+ hit_inner_else = False
+
+ try:
+ try:
+ pass
+ except:
+ hit_inner_except = True
+ else:
+ hit_inner_else = True
+
+ raise Exception('outer exception')
+ except* BaseException:
+ hit_except = True
+ else:
+ hit_else = True
+ finally:
+ hit_finally = True
+
+ self.assertFalse(hit_inner_except)
+ self.assertTrue(hit_inner_else)
+ self.assertFalse(hit_else)
+ self.assertTrue(hit_finally)
+ self.assertTrue(hit_except)
+'''
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 1864e611ab..57afb6ec6f 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -1,24 +1,33 @@
# Python test set -- part 5, built-in exceptions
import copy
-import gc
import os
import sys
import unittest
import pickle
import weakref
import errno
+from codecs import BOM_UTF8
+from itertools import product
from textwrap import dedent
from test.support import (captured_stderr, check_impl_detail,
cpython_only, gc_collect,
no_tracing, script_helper,
- SuppressCrashReport)
+ SuppressCrashReport,
+ force_not_colorized)
from test.support.import_helper import import_module
from test.support.os_helper import TESTFN, unlink
from test.support.warnings_helper import check_warnings
from test import support
+try:
+ import _testcapi
+ from _testcapi import INT_MAX
+except ImportError:
+ _testcapi = None
+ INT_MAX = 2**31 - 1
+
class NaiveException(Exception):
def __init__(self, x):
@@ -35,6 +44,7 @@ def __str__(self):
# XXX This is not really enough, each *operation* should be tested!
+
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
@@ -160,6 +170,7 @@ def ckmsg(src, msg):
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
+ ckmsg("f'{6 0}'", "invalid syntax. Perhaps you forgot a comma?")
# TODO: RUSTPYTHON
@unittest.expectedFailure
@@ -220,7 +231,7 @@ def check(self, src, lineno, offset, end_lineno=None, end_offset=None, encoding=
src = src.decode(encoding, 'replace')
line = src.split('\n')[lineno-1]
self.assertIn(line, cm.exception.text)
-
+
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_error_offset_continuation_characters(self):
@@ -238,7 +249,7 @@ def testSyntaxErrorOffset(self):
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
check(b'# -*- coding: cp1251 -*-\nPython = "\xcf\xb3\xf2\xee\xed" +',
2, 19, encoding='cp1251')
- check(b'Python = "\xcf\xb3\xf2\xee\xed" +', 1, 18)
+ check(b'Python = "\xcf\xb3\xf2\xee\xed" +', 1, 10)
check('x = "a', 1, 5)
check('lambda x: x = 2', 1, 1)
check('f{a + b + c}', 1, 2)
@@ -263,7 +274,7 @@ def testSyntaxErrorOffset(self):
check('try:\n pass\nexcept*:\n pass', 3, 8)
check('try:\n pass\nexcept*:\n pass\nexcept* ValueError:\n pass', 3, 8)
- # Errors thrown by tokenizer.c
+ # Errors thrown by the tokenizer
check('(0x+1)', 1, 3)
check('x = 0xI', 1, 6)
check('0010 + 2', 1, 1)
@@ -305,6 +316,7 @@ def baz():
{
6
0="""''', 5, 13)
+ check('b"fooжжж"'.encode(), 1, 1, 1, 10)
# Errors thrown by symtable.c
check('x = [(yield i) for i in range(3)]', 1, 7)
@@ -317,8 +329,8 @@ def baz():
check('def f():\n global x\n nonlocal x', 2, 3)
# Errors thrown by future.c
- check('from __future__ import doesnt_exist', 1, 1)
- check('from __future__ import braces', 1, 1)
+ check('from __future__ import doesnt_exist', 1, 24)
+ check('from __future__ import braces', 1, 24)
check('x=1\nfrom __future__ import division', 2, 1)
check('foo(1=2)', 1, 5)
check('def f():\n x, y: int', 2, 3)
@@ -328,6 +340,14 @@ def baz():
check('(yield i) = 2', 1, 2)
check('def f(*):\n pass', 1, 7)
+ @unittest.skipIf(INT_MAX >= sys.maxsize, "Downcasting to int is safe for col_offset")
+ @support.requires_resource('cpu')
+ @support.bigmemtest(INT_MAX, memuse=2, dry_run=False)
+ def testMemoryErrorBigSource(self, size):
+ src = b"if True:\n%*s" % (size, b"pass")
+ with self.assertRaisesRegex(OverflowError, "Parser column offset overflow"):
+ compile(src, '', 'exec')
+
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
@@ -340,24 +360,23 @@ def __init__(self_):
class InvalidException:
pass
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_capi1():
- import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
- exc, err, tb = sys.exc_info()
- co = tb.tb_frame.f_code
+ co = err.__traceback__.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_capi2():
- import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
- exc, err, tb = sys.exc_info()
+ tb = err.__traceback__.tb_next
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
@@ -366,15 +385,14 @@ def test_capi2():
else:
self.fail("Expected exception")
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_capi3():
- import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
- if not sys.platform.startswith('java'):
- test_capi1()
- test_capi2()
- test_capi3()
+ test_capi1()
+ test_capi2()
+ test_capi3()
def test_WindowsError(self):
try:
@@ -425,49 +443,51 @@ def test_windows_message(self):
with self.assertRaisesRegex(OSError, 'Windows Error 0x%x' % code):
ctypes.pythonapi.PyErr_SetFromWindowsErr(code)
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
- (BaseException, (), {'args' : ()}),
- (BaseException, (1, ), {'args' : (1,)}),
- (BaseException, ('foo',),
+ (BaseException, (), {}, {'args' : ()}),
+ (BaseException, (1, ), {}, {'args' : (1,)}),
+ (BaseException, ('foo',), {},
{'args' : ('foo',)}),
- (BaseException, ('foo', 1),
+ (BaseException, ('foo', 1), {},
{'args' : ('foo', 1)}),
- (SystemExit, ('foo',),
+ (SystemExit, ('foo',), {},
{'args' : ('foo',), 'code' : 'foo'}),
- (OSError, ('foo',),
+ (OSError, ('foo',), {},
{'args' : ('foo',), 'filename' : None, 'filename2' : None,
'errno' : None, 'strerror' : None}),
- (OSError, ('foo', 'bar'),
+ (OSError, ('foo', 'bar'), {},
{'args' : ('foo', 'bar'),
'filename' : None, 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
- (OSError, ('foo', 'bar', 'baz'),
+ (OSError, ('foo', 'bar', 'baz'), {},
{'args' : ('foo', 'bar'),
'filename' : 'baz', 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
- (OSError, ('foo', 'bar', 'baz', None, 'quux'),
+ (OSError, ('foo', 'bar', 'baz', None, 'quux'), {},
{'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}),
- (OSError, ('errnoStr', 'strErrorStr', 'filenameStr'),
+ (OSError, ('errnoStr', 'strErrorStr', 'filenameStr'), {},
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
- (OSError, (1, 'strErrorStr', 'filenameStr'),
+ (OSError, (1, 'strErrorStr', 'filenameStr'), {},
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr',
'filename' : 'filenameStr', 'filename2' : None}),
- (SyntaxError, (), {'msg' : None, 'text' : None,
+ (SyntaxError, (), {}, {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'end_offset': None, 'print_file_and_line' : None}),
- (SyntaxError, ('msgStr',),
+ (SyntaxError, ('msgStr',), {},
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None,
'end_offset': None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
- 'textStr', 'endLinenoStr', 'endOffsetStr')),
+ 'textStr', 'endLinenoStr', 'endOffsetStr')), {},
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr',
@@ -477,7 +497,7 @@ def testAttributes(self):
'end_lineno': 'endLinenoStr', 'end_offset': 'endOffsetStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'endLinenoStr', 'endOffsetStr',
- 'print_file_and_lineStr'),
+ 'print_file_and_lineStr'), {},
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'endLinenoStr', 'endOffsetStr',
@@ -485,38 +505,40 @@ def testAttributes(self):
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None,
'end_lineno': None, 'end_offset': None}),
- (UnicodeError, (), {'args' : (),}),
+ (UnicodeError, (), {}, {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
- 'ordinal not in range'),
+ 'ordinal not in range'), {},
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
- 'ordinal not in range'),
+ 'ordinal not in range'), {},
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
- 'ordinal not in range'),
+ 'ordinal not in range'), {},
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
- (UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
+ (UnicodeTranslateError, ("\u3042", 0, 1, "ouch"), {},
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
- (NaiveException, ('foo',),
+ (NaiveException, ('foo',), {},
{'args': ('foo',), 'x': 'foo'}),
- (SlottedNaiveException, ('foo',),
+ (SlottedNaiveException, ('foo',), {},
{'args': ('foo',), 'x': 'foo'}),
+ (AttributeError, ('foo',), dict(name='name', obj='obj'),
+ dict(args=('foo',), name='name', obj='obj')),
]
try:
# More tests are in test_WindowsError
exceptionList.append(
- (WindowsError, (1, 'strErrorStr', 'filenameStr'),
+ (WindowsError, (1, 'strErrorStr', 'filenameStr'), {},
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : None,
'errno' : 1,
@@ -525,11 +547,11 @@ def testAttributes(self):
except NameError:
pass
- for exc, args, expected in exceptionList:
+ for exc, args, kwargs, expected in exceptionList:
try:
- e = exc(*args)
+ e = exc(*args, **kwargs)
except:
- print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
+ print(f"\nexc={exc!r}, args={args!r}", file=sys.stderr)
# raise
else:
# Verify module name
@@ -552,11 +574,39 @@ def testAttributes(self):
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
- want = repr(expected[checkArgName])
+ if exc == AttributeError and checkArgName == 'obj':
+ # See GH-103352, we're not pickling
+ # obj at this point. So verify it's None.
+ want = repr(None)
+ else:
+ want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
+ def test_setstate(self):
+ e = Exception(42)
+ e.blah = 53
+ self.assertEqual(e.args, (42,))
+ self.assertEqual(e.blah, 53)
+ self.assertRaises(AttributeError, getattr, e, 'a')
+ self.assertRaises(AttributeError, getattr, e, 'b')
+ e.__setstate__({'a': 1 , 'b': 2})
+ self.assertEqual(e.args, (42,))
+ self.assertEqual(e.blah, 53)
+ self.assertEqual(e.a, 1)
+ self.assertEqual(e.b, 2)
+ e.__setstate__({'a': 11, 'args': (1,2,3), 'blah': 35})
+ self.assertEqual(e.args, (1,2,3))
+ self.assertEqual(e.blah, 35)
+ self.assertEqual(e.a, 11)
+ self.assertEqual(e.b, 2)
+
+ def test_invalid_setstate(self):
+ e = Exception(42)
+ with self.assertRaisesRegex(TypeError, "state is not a dictionary"):
+ e.__setstate__(42)
+
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_notes(self):
@@ -589,8 +639,8 @@ def test_notes(self):
def testWithTraceback(self):
try:
raise IndexError(4)
- except:
- tb = sys.exc_info()[2]
+ except Exception as e:
+ tb = e.__traceback__
e = BaseException().with_traceback(tb)
self.assertIsInstance(e, BaseException)
@@ -615,17 +665,40 @@ def testInvalidTraceback(self):
else:
self.fail("No exception raised")
- def testInvalidAttrs(self):
- self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
- self.assertRaises(TypeError, delattr, Exception(), '__cause__')
- self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
- self.assertRaises(TypeError, delattr, Exception(), '__context__')
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_invalid_setattr(self):
+ TE = TypeError
+ exc = Exception()
+ msg = "'int' object is not iterable"
+ self.assertRaisesRegex(TE, msg, setattr, exc, 'args', 1)
+ msg = "__traceback__ must be a traceback or None"
+ self.assertRaisesRegex(TE, msg, setattr, exc, '__traceback__', 1)
+ msg = "exception cause must be None or derive from BaseException"
+ self.assertRaisesRegex(TE, msg, setattr, exc, '__cause__', 1)
+ msg = "exception context must be None or derive from BaseException"
+ self.assertRaisesRegex(TE, msg, setattr, exc, '__context__', 1)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_invalid_delattr(self):
+ TE = TypeError
+ try:
+ raise IndexError(4)
+ except Exception as e:
+ exc = e
+
+ msg = "may not be deleted"
+ self.assertRaisesRegex(TE, msg, delattr, exc, 'args')
+ self.assertRaisesRegex(TE, msg, delattr, exc, '__traceback__')
+ self.assertRaisesRegex(TE, msg, delattr, exc, '__cause__')
+ self.assertRaisesRegex(TE, msg, delattr, exc, '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
- except:
- tb = sys.exc_info()[2]
+ except Exception as e:
+ tb = e.__traceback__
e = Exception()
e.__traceback__ = tb
@@ -860,28 +933,28 @@ def yield_raise():
try:
raise KeyError("caught")
except KeyError:
- yield sys.exc_info()[0]
- yield sys.exc_info()[0]
- yield sys.exc_info()[0]
+ yield sys.exception()
+ yield sys.exception()
+ yield sys.exception()
g = yield_raise()
- self.assertEqual(next(g), KeyError)
- self.assertEqual(sys.exc_info()[0], None)
- self.assertEqual(next(g), KeyError)
- self.assertEqual(sys.exc_info()[0], None)
- self.assertEqual(next(g), None)
+ self.assertIsInstance(next(g), KeyError)
+ self.assertIsNone(sys.exception())
+ self.assertIsInstance(next(g), KeyError)
+ self.assertIsNone(sys.exception())
+ self.assertIsNone(next(g))
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
- self.assertEqual(next(g), KeyError)
- self.assertEqual(sys.exc_info()[0], TypeError)
- self.assertEqual(next(g), KeyError)
- self.assertEqual(sys.exc_info()[0], TypeError)
- self.assertEqual(next(g), TypeError)
+ self.assertIsInstance(next(g), KeyError)
+ self.assertIsInstance(sys.exception(), TypeError)
+ self.assertIsInstance(next(g), KeyError)
+ self.assertIsInstance(sys.exception(), TypeError)
+ self.assertIsInstance(next(g), TypeError)
del g
- self.assertEqual(sys.exc_info()[0], TypeError)
+ self.assertIsInstance(sys.exception(), TypeError)
def test_generator_leaking2(self):
# See issue 12475.
@@ -896,7 +969,7 @@ def g():
next(it)
except StopIteration:
pass
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
def test_generator_leaking3(self):
# See issue #23353. When gen.throw() is called, the caller's
@@ -905,17 +978,17 @@ def g():
try:
yield
except ZeroDivisionError:
- yield sys.exc_info()[1]
+ yield sys.exception()
it = g()
next(it)
try:
1/0
except ZeroDivisionError as e:
- self.assertIs(sys.exc_info()[1], e)
+ self.assertIs(sys.exception(), e)
gen_exc = it.throw(e)
- self.assertIs(sys.exc_info()[1], e)
+ self.assertIs(sys.exception(), e)
self.assertIs(gen_exc, e)
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
def test_generator_leaking4(self):
# See issue #23353. When an exception is raised by a generator,
@@ -924,7 +997,7 @@ def g():
try:
1/0
except ZeroDivisionError:
- yield sys.exc_info()[0]
+ yield sys.exception()
raise
it = g()
try:
@@ -932,7 +1005,7 @@ def g():
except TypeError:
# The caller's exception state (TypeError) is temporarily
# saved in the generator.
- tp = next(it)
+ tp = type(next(it))
self.assertIs(tp, ZeroDivisionError)
try:
next(it)
@@ -940,15 +1013,15 @@ def g():
# with an exception, it shouldn't have restored the old
# exception state (TypeError).
except ZeroDivisionError as e:
- self.assertIs(sys.exc_info()[1], e)
+ self.assertIs(sys.exception(), e)
# We used to find TypeError here.
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
def test_generator_doesnt_retain_old_exc(self):
def g():
- self.assertIsInstance(sys.exc_info()[1], RuntimeError)
+ self.assertIsInstance(sys.exception(), RuntimeError)
yield
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
it = g()
try:
raise RuntimeError
@@ -956,7 +1029,7 @@ def g():
next(it)
self.assertRaises(StopIteration, next, it)
- def test_generator_finalizing_and_exc_info(self):
+ def test_generator_finalizing_and_sys_exception(self):
# See #7173
def simple_gen():
yield 1
@@ -968,7 +1041,7 @@ def run_gen():
return next(gen)
run_gen()
gc_collect()
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
def _check_generator_cleanup_exc_state(self, testfunc):
# Issue #12791: exception state is cleaned up as soon as a generator
@@ -1039,14 +1112,14 @@ def test_3114(self):
class MyObject:
def __del__(self):
nonlocal e
- e = sys.exc_info()
+ e = sys.exception()
e = ()
try:
raise Exception(MyObject())
except:
pass
gc_collect() # For PyPy or other GCs.
- self.assertEqual(e, (None, None, None))
+ self.assertIsNone(e)
def test_raise_does_not_create_context_chain_cycle(self):
class A(Exception):
@@ -1088,7 +1161,6 @@ class C(Exception):
self.assertIs(c.__context__, b)
self.assertIsNone(b.__context__)
-
# TODO: RUSTPYTHON
@unittest.skip("Infinite loop")
def test_no_hang_on_context_chain_cycle1(self):
@@ -1110,7 +1182,6 @@ def cycle():
self.assertIsInstance(exc.__context__, ValueError)
self.assertIs(exc.__context__.__context__, exc.__context__)
- @unittest.skip("See issue 44895")
def test_no_hang_on_context_chain_cycle2(self):
# See issue 25782. Cycle at head of context chain.
@@ -1291,6 +1362,31 @@ def test_unicode_errors_no_object(self):
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
+ # TODO: RUSTPYTHON; OverflowError: Python int too large to convert to Rust usize
+ @unittest.expectedFailure
+ def test_unicode_error_str_does_not_crash(self):
+ # Test that str(UnicodeError(...)) does not crash.
+ # See https://github.com/python/cpython/issues/123378.
+
+ for start, end, objlen in product(
+ range(-5, 5),
+ range(-5, 5),
+ range(7),
+ ):
+ obj = 'a' * objlen
+ with self.subTest('encode', objlen=objlen, start=start, end=end):
+ exc = UnicodeEncodeError('utf-8', obj, start, end, '')
+ self.assertIsInstance(str(exc), str)
+
+ with self.subTest('translate', objlen=objlen, start=start, end=end):
+ exc = UnicodeTranslateError(obj, start, end, '')
+ self.assertIsInstance(str(exc), str)
+
+ encoded = obj.encode()
+ with self.subTest('decode', objlen=objlen, start=start, end=end):
+ exc = UnicodeDecodeError('utf-8', encoded, start, end, '')
+ self.assertIsInstance(str(exc), str)
+
@no_tracing
@unittest.skipIf(sys.platform == 'win32', 'TODO: RUSTPYTHON Windows')
def test_badisinstance(self):
@@ -1317,14 +1413,15 @@ class MyException(Exception, metaclass=Meta):
def g():
try:
return g()
- except RecursionError:
- return sys.exc_info()
- e, v, tb = g()
- self.assertIsInstance(v, RecursionError, type(v))
- self.assertIn("maximum recursion depth exceeded", str(v))
+ except RecursionError as e:
+ return e
+ exc = g()
+ self.assertIsInstance(exc, RecursionError, type(exc))
+ self.assertIn("maximum recursion depth exceeded", str(exc))
@cpython_only
+ @support.requires_resource('cpu')
def test_trashcan_recursion(self):
# See bpo-33930
@@ -1340,6 +1437,7 @@ def foo():
@cpython_only
def test_recursion_normalizing_exception(self):
+ import_module("_testinternalcapi")
# Issue #22898.
# Test that a RecursionError is raised when tstate->recursion_depth is
# equal to recursion_limit in PyErr_NormalizeException() and check
@@ -1352,6 +1450,7 @@ def test_recursion_normalizing_exception(self):
code = """if 1:
import sys
from _testinternalcapi import get_recursion_depth
+ from test import support
class MyException(Exception): pass
@@ -1379,13 +1478,8 @@ def gen():
generator = gen()
next(generator)
recursionlimit = sys.getrecursionlimit()
- depth = get_recursion_depth()
try:
- # Upon the last recursive invocation of recurse(),
- # tstate->recursion_depth is equal to (recursion_limit - 1)
- # and is equal to recursion_limit when _gen_throw() calls
- # PyErr_NormalizeException().
- recurse(setrecursionlimit(depth + 2) - depth)
+ recurse(support.exceeds_recursion_limit())
finally:
sys.setrecursionlimit(recursionlimit)
print('Done.')
@@ -1398,10 +1492,12 @@ def gen():
self.assertIn(b'Done.', out)
@cpython_only
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
+ @force_not_colorized
def test_recursion_normalizing_infinite_exception(self):
# Issue #30697. Test that a RecursionError is raised when
- # PyErr_NormalizeException() maximum recursion depth has been
- # exceeded.
+ # maximum recursion depth has been exceeded when creating
+ # an exception
code = """if 1:
import _testcapi
try:
@@ -1411,8 +1507,8 @@ def test_recursion_normalizing_infinite_exception(self):
"""
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertEqual(rc, 1)
- self.assertIn(b'RecursionError: maximum recursion depth exceeded '
- b'while normalizing an exception', err)
+ expected = b'RecursionError: maximum recursion depth exceeded'
+ self.assertTrue(expected in err, msg=f"{expected!r} not found in {err[:3_000]!r}... (truncated)")
self.assertIn(b'Done.', out)
@@ -1464,6 +1560,10 @@ def recurse_in_body_and_except():
@cpython_only
+ # Python built with Py_TRACE_REFS fail with a fatal error in
+ # _PyRefchain_Trace() on memory allocation error.
+ @unittest.skipIf(support.Py_TRACE_REFS, 'cannot test Py_TRACE_REFS build')
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_recursion_normalizing_with_no_memory(self):
# Issue #30697. Test that in the abort that occurs when there is no
# memory left and the size of the Python frames stack is greater than
@@ -1486,6 +1586,7 @@ def recurse(cnt):
self.assertIn(b'MemoryError', err)
@cpython_only
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
@@ -1505,8 +1606,8 @@ def raiseMemError():
self.assertEqual(tb1, tb2)
@cpython_only
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_exception_with_doc(self):
- import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
@@ -1545,6 +1646,7 @@ class C(object):
self.assertEqual(error5.__doc__, "")
@cpython_only
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
@@ -1567,8 +1669,8 @@ def inner():
gc_collect() # For PyPy or other GCs.
self.assertEqual(wr(), None)
- @no_tracing
@unittest.skipIf(sys.platform == 'win32', 'TODO: RUSTPYTHON Windows')
+ @no_tracing
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
@@ -1636,6 +1738,10 @@ def test_unhandled(self):
self.assertTrue(report.endswith("\n"))
@cpython_only
+ # Python built with Py_TRACE_REFS fail with a fatal error in
+ # _PyRefchain_Trace() on memory allocation error.
+ @unittest.skipIf(support.Py_TRACE_REFS, 'cannot test Py_TRACE_REFS build')
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_memory_error_in_PyErr_PrintEx(self):
code = """if 1:
import _testcapi
@@ -1682,7 +1788,7 @@ def g():
raise ValueError
except ValueError:
yield 1
- self.assertEqual(sys.exc_info(), (None, None, None))
+ self.assertIsNone(sys.exception())
yield 2
gen = g()
@@ -1756,7 +1862,21 @@ class TestException(MemoryError):
gc_collect()
-global_for_suggestions = None
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
+ def test_memory_error_in_subinterp(self):
+ # gh-109894: subinterpreters shouldn't count on last resort memory error
+ # when MemoryError is raised through PyErr_NoMemory() call,
+ # and should preallocate memory errors as does the main interpreter.
+ # interp.static_objects.last_resort_memory_error.args
+ # should be initialized to empty tuple to avoid crash on attempt to print it.
+ code = f"""if 1:
+ import _testcapi
+ _testcapi.run_in_subinterp(\"[0]*{sys.maxsize}\")
+ exit(0)
+ """
+ rc, _, err = script_helper.assert_python_ok("-c", code)
+ self.assertIn(b'MemoryError', err)
+
class NameErrorTests(unittest.TestCase):
def test_name_error_has_name(self):
@@ -1765,272 +1885,6 @@ def test_name_error_has_name(self):
except NameError as exc:
self.assertEqual("bluch", exc.name)
- def test_name_error_suggestions(self):
- def Substitution():
- noise = more_noise = a = bc = None
- blech = None
- print(bluch)
-
- def Elimination():
- noise = more_noise = a = bc = None
- blch = None
- print(bluch)
-
- def Addition():
- noise = more_noise = a = bc = None
- bluchin = None
- print(bluch)
-
- def SubstitutionOverElimination():
- blach = None
- bluc = None
- print(bluch)
-
- def SubstitutionOverAddition():
- blach = None
- bluchi = None
- print(bluch)
-
- def EliminationOverAddition():
- blucha = None
- bluc = None
- print(bluch)
-
- for func, suggestion in [(Substitution, "'blech'?"),
- (Elimination, "'blch'?"),
- (Addition, "'bluchin'?"),
- (EliminationOverAddition, "'blucha'?"),
- (SubstitutionOverElimination, "'blach'?"),
- (SubstitutionOverAddition, "'blach'?")]:
- err = None
- try:
- func()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertIn(suggestion, err.getvalue())
-
- def test_name_error_suggestions_from_globals(self):
- def func():
- print(global_for_suggestio)
- try:
- func()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertIn("'global_for_suggestions'?", err.getvalue())
-
- def test_name_error_suggestions_from_builtins(self):
- def func():
- print(ZeroDivisionErrrrr)
- try:
- func()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertIn("'ZeroDivisionError'?", err.getvalue())
-
- def test_name_error_suggestions_do_not_trigger_for_long_names(self):
- def f():
- somethingverywronghehehehehehe = None
- print(somethingverywronghe)
-
- try:
- f()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("somethingverywronghehe", err.getvalue())
-
- def test_name_error_bad_suggestions_do_not_trigger_for_small_names(self):
- vvv = mom = w = id = pytho = None
-
- with self.subTest(name="b"):
- try:
- b
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="v"):
- try:
- v
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="m"):
- try:
- m
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="py"):
- try:
- py
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- def test_name_error_suggestions_do_not_trigger_for_too_many_locals(self):
- def f():
- # Mutating locals() is unreliable, so we need to do it by hand
- a1 = a2 = a3 = a4 = a5 = a6 = a7 = a8 = a9 = a10 = \
- a11 = a12 = a13 = a14 = a15 = a16 = a17 = a18 = a19 = a20 = \
- a21 = a22 = a23 = a24 = a25 = a26 = a27 = a28 = a29 = a30 = \
- a31 = a32 = a33 = a34 = a35 = a36 = a37 = a38 = a39 = a40 = \
- a41 = a42 = a43 = a44 = a45 = a46 = a47 = a48 = a49 = a50 = \
- a51 = a52 = a53 = a54 = a55 = a56 = a57 = a58 = a59 = a60 = \
- a61 = a62 = a63 = a64 = a65 = a66 = a67 = a68 = a69 = a70 = \
- a71 = a72 = a73 = a74 = a75 = a76 = a77 = a78 = a79 = a80 = \
- a81 = a82 = a83 = a84 = a85 = a86 = a87 = a88 = a89 = a90 = \
- a91 = a92 = a93 = a94 = a95 = a96 = a97 = a98 = a99 = a100 = \
- a101 = a102 = a103 = a104 = a105 = a106 = a107 = a108 = a109 = a110 = \
- a111 = a112 = a113 = a114 = a115 = a116 = a117 = a118 = a119 = a120 = \
- a121 = a122 = a123 = a124 = a125 = a126 = a127 = a128 = a129 = a130 = \
- a131 = a132 = a133 = a134 = a135 = a136 = a137 = a138 = a139 = a140 = \
- a141 = a142 = a143 = a144 = a145 = a146 = a147 = a148 = a149 = a150 = \
- a151 = a152 = a153 = a154 = a155 = a156 = a157 = a158 = a159 = a160 = \
- a161 = a162 = a163 = a164 = a165 = a166 = a167 = a168 = a169 = a170 = \
- a171 = a172 = a173 = a174 = a175 = a176 = a177 = a178 = a179 = a180 = \
- a181 = a182 = a183 = a184 = a185 = a186 = a187 = a188 = a189 = a190 = \
- a191 = a192 = a193 = a194 = a195 = a196 = a197 = a198 = a199 = a200 = \
- a201 = a202 = a203 = a204 = a205 = a206 = a207 = a208 = a209 = a210 = \
- a211 = a212 = a213 = a214 = a215 = a216 = a217 = a218 = a219 = a220 = \
- a221 = a222 = a223 = a224 = a225 = a226 = a227 = a228 = a229 = a230 = \
- a231 = a232 = a233 = a234 = a235 = a236 = a237 = a238 = a239 = a240 = \
- a241 = a242 = a243 = a244 = a245 = a246 = a247 = a248 = a249 = a250 = \
- a251 = a252 = a253 = a254 = a255 = a256 = a257 = a258 = a259 = a260 = \
- a261 = a262 = a263 = a264 = a265 = a266 = a267 = a268 = a269 = a270 = \
- a271 = a272 = a273 = a274 = a275 = a276 = a277 = a278 = a279 = a280 = \
- a281 = a282 = a283 = a284 = a285 = a286 = a287 = a288 = a289 = a290 = \
- a291 = a292 = a293 = a294 = a295 = a296 = a297 = a298 = a299 = a300 = \
- a301 = a302 = a303 = a304 = a305 = a306 = a307 = a308 = a309 = a310 = \
- a311 = a312 = a313 = a314 = a315 = a316 = a317 = a318 = a319 = a320 = \
- a321 = a322 = a323 = a324 = a325 = a326 = a327 = a328 = a329 = a330 = \
- a331 = a332 = a333 = a334 = a335 = a336 = a337 = a338 = a339 = a340 = \
- a341 = a342 = a343 = a344 = a345 = a346 = a347 = a348 = a349 = a350 = \
- a351 = a352 = a353 = a354 = a355 = a356 = a357 = a358 = a359 = a360 = \
- a361 = a362 = a363 = a364 = a365 = a366 = a367 = a368 = a369 = a370 = \
- a371 = a372 = a373 = a374 = a375 = a376 = a377 = a378 = a379 = a380 = \
- a381 = a382 = a383 = a384 = a385 = a386 = a387 = a388 = a389 = a390 = \
- a391 = a392 = a393 = a394 = a395 = a396 = a397 = a398 = a399 = a400 = \
- a401 = a402 = a403 = a404 = a405 = a406 = a407 = a408 = a409 = a410 = \
- a411 = a412 = a413 = a414 = a415 = a416 = a417 = a418 = a419 = a420 = \
- a421 = a422 = a423 = a424 = a425 = a426 = a427 = a428 = a429 = a430 = \
- a431 = a432 = a433 = a434 = a435 = a436 = a437 = a438 = a439 = a440 = \
- a441 = a442 = a443 = a444 = a445 = a446 = a447 = a448 = a449 = a450 = \
- a451 = a452 = a453 = a454 = a455 = a456 = a457 = a458 = a459 = a460 = \
- a461 = a462 = a463 = a464 = a465 = a466 = a467 = a468 = a469 = a470 = \
- a471 = a472 = a473 = a474 = a475 = a476 = a477 = a478 = a479 = a480 = \
- a481 = a482 = a483 = a484 = a485 = a486 = a487 = a488 = a489 = a490 = \
- a491 = a492 = a493 = a494 = a495 = a496 = a497 = a498 = a499 = a500 = \
- a501 = a502 = a503 = a504 = a505 = a506 = a507 = a508 = a509 = a510 = \
- a511 = a512 = a513 = a514 = a515 = a516 = a517 = a518 = a519 = a520 = \
- a521 = a522 = a523 = a524 = a525 = a526 = a527 = a528 = a529 = a530 = \
- a531 = a532 = a533 = a534 = a535 = a536 = a537 = a538 = a539 = a540 = \
- a541 = a542 = a543 = a544 = a545 = a546 = a547 = a548 = a549 = a550 = \
- a551 = a552 = a553 = a554 = a555 = a556 = a557 = a558 = a559 = a560 = \
- a561 = a562 = a563 = a564 = a565 = a566 = a567 = a568 = a569 = a570 = \
- a571 = a572 = a573 = a574 = a575 = a576 = a577 = a578 = a579 = a580 = \
- a581 = a582 = a583 = a584 = a585 = a586 = a587 = a588 = a589 = a590 = \
- a591 = a592 = a593 = a594 = a595 = a596 = a597 = a598 = a599 = a600 = \
- a601 = a602 = a603 = a604 = a605 = a606 = a607 = a608 = a609 = a610 = \
- a611 = a612 = a613 = a614 = a615 = a616 = a617 = a618 = a619 = a620 = \
- a621 = a622 = a623 = a624 = a625 = a626 = a627 = a628 = a629 = a630 = \
- a631 = a632 = a633 = a634 = a635 = a636 = a637 = a638 = a639 = a640 = \
- a641 = a642 = a643 = a644 = a645 = a646 = a647 = a648 = a649 = a650 = \
- a651 = a652 = a653 = a654 = a655 = a656 = a657 = a658 = a659 = a660 = \
- a661 = a662 = a663 = a664 = a665 = a666 = a667 = a668 = a669 = a670 = \
- a671 = a672 = a673 = a674 = a675 = a676 = a677 = a678 = a679 = a680 = \
- a681 = a682 = a683 = a684 = a685 = a686 = a687 = a688 = a689 = a690 = \
- a691 = a692 = a693 = a694 = a695 = a696 = a697 = a698 = a699 = a700 = \
- a701 = a702 = a703 = a704 = a705 = a706 = a707 = a708 = a709 = a710 = \
- a711 = a712 = a713 = a714 = a715 = a716 = a717 = a718 = a719 = a720 = \
- a721 = a722 = a723 = a724 = a725 = a726 = a727 = a728 = a729 = a730 = \
- a731 = a732 = a733 = a734 = a735 = a736 = a737 = a738 = a739 = a740 = \
- a741 = a742 = a743 = a744 = a745 = a746 = a747 = a748 = a749 = a750 = \
- a751 = a752 = a753 = a754 = a755 = a756 = a757 = a758 = a759 = a760 = \
- a761 = a762 = a763 = a764 = a765 = a766 = a767 = a768 = a769 = a770 = \
- a771 = a772 = a773 = a774 = a775 = a776 = a777 = a778 = a779 = a780 = \
- a781 = a782 = a783 = a784 = a785 = a786 = a787 = a788 = a789 = a790 = \
- a791 = a792 = a793 = a794 = a795 = a796 = a797 = a798 = a799 = a800 \
- = None
- print(a0)
-
- try:
- f()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotRegex(err.getvalue(), r"NameError.*a1")
-
- def test_name_error_with_custom_exceptions(self):
- def f():
- blech = None
- raise NameError()
-
- try:
- f()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("blech", err.getvalue())
-
- def f():
- blech = None
- raise NameError
-
- try:
- f()
- except NameError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("blech", err.getvalue())
-
- def test_unbound_local_error_doesn_not_match(self):
- def foo():
- something = 3
- print(somethong)
- somethong = 3
-
- try:
- foo()
- except UnboundLocalError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("something", err.getvalue())
-
def test_issue45826(self):
# regression test for bpo-45826
def f():
@@ -2042,6 +1896,8 @@ def f():
except self.failureException:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
+ else:
+ self.fail("assertRaisesRegex should have failed.")
self.assertIn("aab", err.getvalue())
@@ -2062,6 +1918,15 @@ def f():
self.assertIn("nonsense", err.getvalue())
self.assertIn("ZeroDivisionError", err.getvalue())
+ def test_gh_111654(self):
+ def f():
+ class TestClass:
+ TestClass
+
+ self.assertRaises(NameError, f)
+
+ # Note: name suggestion tests live in `test_traceback`.
+
class AttributeErrorTests(unittest.TestCase):
def test_attributes(self):
@@ -2103,244 +1968,13 @@ def blech(self):
self.assertEqual("bluch", exc.name)
self.assertEqual(obj, exc.obj)
- def test_getattr_suggestions(self):
- class Substitution:
- noise = more_noise = a = bc = None
- blech = None
+ # Note: name suggestion tests live in `test_traceback`.
- class Elimination:
- noise = more_noise = a = bc = None
- blch = None
- class Addition:
- noise = more_noise = a = bc = None
- bluchin = None
-
- class SubstitutionOverElimination:
- blach = None
- bluc = None
-
- class SubstitutionOverAddition:
- blach = None
- bluchi = None
-
- class EliminationOverAddition:
- blucha = None
- bluc = None
-
- for cls, suggestion in [(Substitution, "'blech'?"),
- (Elimination, "'blch'?"),
- (Addition, "'bluchin'?"),
- (EliminationOverAddition, "'bluc'?"),
- (SubstitutionOverElimination, "'blach'?"),
- (SubstitutionOverAddition, "'blach'?")]:
- try:
- cls().bluch
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertIn(suggestion, err.getvalue())
-
- def test_getattr_suggestions_do_not_trigger_for_long_attributes(self):
- class A:
- blech = None
-
- try:
- A().somethingverywrong
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("blech", err.getvalue())
-
- def test_getattr_error_bad_suggestions_do_not_trigger_for_small_names(self):
- class MyClass:
- vvv = mom = w = id = pytho = None
-
- with self.subTest(name="b"):
- try:
- MyClass.b
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="v"):
- try:
- MyClass.v
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="m"):
- try:
- MyClass.m
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
- with self.subTest(name="py"):
- try:
- MyClass.py
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
- self.assertNotIn("you mean", err.getvalue())
- self.assertNotIn("vvv", err.getvalue())
- self.assertNotIn("mom", err.getvalue())
- self.assertNotIn("'id'", err.getvalue())
- self.assertNotIn("'w'", err.getvalue())
- self.assertNotIn("'pytho'", err.getvalue())
-
-
- def test_getattr_suggestions_do_not_trigger_for_big_dicts(self):
- class A:
- blech = None
- # A class with a very big __dict__ will not be consider
- # for suggestions.
- for index in range(2000):
- setattr(A, f"index_{index}", None)
-
- try:
- A().bluch
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("blech", err.getvalue())
-
- def test_getattr_suggestions_no_args(self):
- class A:
- blech = None
- def __getattr__(self, attr):
- raise AttributeError()
-
- try:
- A().bluch
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertIn("blech", err.getvalue())
-
- class A:
- blech = None
- def __getattr__(self, attr):
- raise AttributeError
-
- try:
- A().bluch
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertIn("blech", err.getvalue())
-
- def test_getattr_suggestions_invalid_args(self):
- class NonStringifyClass:
- __str__ = None
- __repr__ = None
-
- class A:
- blech = None
- def __getattr__(self, attr):
- raise AttributeError(NonStringifyClass())
-
- class B:
- blech = None
- def __getattr__(self, attr):
- raise AttributeError("Error", 23)
-
- class C:
- blech = None
- def __getattr__(self, attr):
- raise AttributeError(23)
-
- for cls in [A, B, C]:
- try:
- cls().bluch
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertIn("blech", err.getvalue())
-
- def test_getattr_suggestions_for_same_name(self):
- class A:
- def __dir__(self):
- return ['blech']
- try:
- A().blech
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("Did you mean", err.getvalue())
-
- def test_attribute_error_with_failing_dict(self):
- class T:
- bluch = 1
- def __dir__(self):
- raise AttributeError("oh no!")
-
- try:
- T().blich
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("blech", err.getvalue())
- self.assertNotIn("oh no!", err.getvalue())
-
- def test_attribute_error_with_bad_name(self):
- try:
- raise AttributeError(name=12, obj=23)
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertNotIn("?", err.getvalue())
+class ImportErrorTests(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
- def test_attribute_error_inside_nested_getattr(self):
- class A:
- bluch = 1
-
- class B:
- def __getattribute__(self, attr):
- a = A()
- return a.blich
-
- try:
- B().something
- except AttributeError as exc:
- with support.captured_stderr() as err:
- sys.__excepthook__(*sys.exc_info())
-
- self.assertIn("Did you mean", err.getvalue())
- self.assertIn("bluch", err.getvalue())
-
-
-class ImportErrorTests(unittest.TestCase):
def test_attributes(self):
# Setting 'name' and 'path' should not be a problem.
exc = ImportError('test')
@@ -2359,7 +1993,7 @@ def test_attributes(self):
self.assertEqual(exc.name, 'somename')
self.assertEqual(exc.path, 'somepath')
- msg = "'invalid' is an invalid keyword argument for ImportError"
+ msg = r"ImportError\(\) got an unexpected keyword argument 'invalid'"
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', invalid='keyword')
@@ -2415,9 +2049,162 @@ def test_copy_pickle(self):
self.assertEqual(exc.name, orig.name)
self.assertEqual(exc.path, orig.path)
+
+def run_script(source):
+ if isinstance(source, str):
+ with open(TESTFN, 'w', encoding='utf-8') as testfile:
+ testfile.write(dedent(source))
+ else:
+ with open(TESTFN, 'wb') as testfile:
+ testfile.write(source)
+ _rc, _out, err = script_helper.assert_python_failure('-Wd', '-X', 'utf8', TESTFN)
+ return err.decode('utf-8').splitlines()
+
+class AssertionErrorTests(unittest.TestCase):
+ def tearDown(self):
+ unlink(TESTFN)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ @force_not_colorized
+ def test_assertion_error_location(self):
+ cases = [
+ ('assert None',
+ [
+ ' assert None',
+ ' ^^^^',
+ 'AssertionError',
+ ],
+ ),
+ ('assert 0',
+ [
+ ' assert 0',
+ ' ^',
+ 'AssertionError',
+ ],
+ ),
+ ('assert 1 > 2',
+ [
+ ' assert 1 > 2',
+ ' ^^^^^',
+ 'AssertionError',
+ ],
+ ),
+ ('assert 1 > 2 and 3 > 2',
+ [
+ ' assert 1 > 2 and 3 > 2',
+ ' ^^^^^^^^^^^^^^^',
+ 'AssertionError',
+ ],
+ ),
+ ('assert 1 > 2, "messäge"',
+ [
+ ' assert 1 > 2, "messäge"',
+ ' ^^^^^',
+ 'AssertionError: messäge',
+ ],
+ ),
+ ('assert 1 > 2, "messäge"'.encode(),
+ [
+ ' assert 1 > 2, "messäge"',
+ ' ^^^^^',
+ 'AssertionError: messäge',
+ ],
+ ),
+ ('# coding: latin1\nassert 1 > 2, "messäge"'.encode('latin1'),
+ [
+ ' assert 1 > 2, "messäge"',
+ ' ^^^^^',
+ 'AssertionError: messäge',
+ ],
+ ),
+ (BOM_UTF8 + 'assert 1 > 2, "messäge"'.encode(),
+ [
+ ' assert 1 > 2, "messäge"',
+ ' ^^^^^',
+ 'AssertionError: messäge',
+ ],
+ ),
+
+ # Multiline:
+ ("""
+ assert (
+ 1 > 2)
+ """,
+ [
+ ' 1 > 2)',
+ ' ^^^^^',
+ 'AssertionError',
+ ],
+ ),
+ ("""
+ assert (
+ 1 > 2), "Message"
+ """,
+ [
+ ' 1 > 2), "Message"',
+ ' ^^^^^',
+ 'AssertionError: Message',
+ ],
+ ),
+ ("""
+ assert (
+ 1 > 2), \\
+ "Message"
+ """,
+ [
+ ' 1 > 2), \\',
+ ' ^^^^^',
+ 'AssertionError: Message',
+ ],
+ ),
+ ]
+ for source, expected in cases:
+ with self.subTest(source=source):
+ result = run_script(source)
+ self.assertEqual(result[-3:], expected)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ @force_not_colorized
+ def test_multiline_not_highlighted(self):
+ cases = [
+ ("""
+ assert (
+ 1 > 2
+ )
+ """,
+ [
+ ' 1 > 2',
+ 'AssertionError',
+ ],
+ ),
+ ("""
+ assert (
+ 1 < 2 and
+ 3 > 4
+ )
+ """,
+ [
+ ' 1 < 2 and',
+ ' 3 > 4',
+ 'AssertionError',
+ ],
+ ),
+ ]
+ for source, expected in cases:
+ with self.subTest(source=source):
+ result = run_script(source)
+ self.assertEqual(result[-len(expected):], expected)
+
+
+@support.force_not_colorized_test_class
class SyntaxErrorTests(unittest.TestCase):
+ maxDiff = None
+
# TODO: RUSTPYTHON
@unittest.expectedFailure
+ @force_not_colorized
def test_range_of_offsets(self):
cases = [
# Basic range from 2->7
@@ -2508,50 +2295,130 @@ def test_range_of_offsets(self):
self.assertIn(expected, err.getvalue())
the_exception = exc
+ def test_subclass(self):
+ class MySyntaxError(SyntaxError):
+ pass
+
+ try:
+ raise MySyntaxError("bad bad", ("bad.py", 1, 2, "abcdefg", 1, 7))
+ except SyntaxError as exc:
+ with support.captured_stderr() as err:
+ sys.__excepthook__(*sys.exc_info())
+ self.assertIn("""
+ File "bad.py", line 1
+ abcdefg
+ ^^^^^
+""", err.getvalue())
+
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encodings(self):
+ self.addCleanup(unlink, TESTFN)
source = (
'# -*- coding: cp437 -*-\n'
'"¢¢¢¢¢¢" + f(4, x for x in range(1))\n'
)
- try:
- with open(TESTFN, 'w', encoding='cp437') as testfile:
- testfile.write(source)
- rc, out, err = script_helper.assert_python_failure('-Wd', '-X', 'utf8', TESTFN)
- err = err.decode('utf-8').splitlines()
-
- self.assertEqual(err[-3], ' "¢¢¢¢¢¢" + f(4, x for x in range(1))')
- self.assertEqual(err[-2], ' ^^^^^^^^^^^^^^^^^^^')
- finally:
- unlink(TESTFN)
+ err = run_script(source.encode('cp437'))
+ self.assertEqual(err[-3], ' "¢¢¢¢¢¢" + f(4, x for x in range(1))')
+ self.assertEqual(err[-2], ' ^^^^^^^^^^^^^^^^^^^')
# Check backwards tokenizer errors
source = '# -*- coding: ascii -*-\n\n(\n'
- try:
- with open(TESTFN, 'w', encoding='ascii') as testfile:
- testfile.write(source)
- rc, out, err = script_helper.assert_python_failure('-Wd', '-X', 'utf8', TESTFN)
- err = err.decode('utf-8').splitlines()
-
- self.assertEqual(err[-3], ' (')
- self.assertEqual(err[-2], ' ^')
- finally:
- unlink(TESTFN)
+ err = run_script(source)
+ self.assertEqual(err[-3], ' (')
+ self.assertEqual(err[-2], ' ^')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_non_utf8(self):
# Check non utf-8 characters
- try:
- with open(TESTFN, 'bw') as testfile:
- testfile.write(b"\x89")
- rc, out, err = script_helper.assert_python_failure('-Wd', '-X', 'utf8', TESTFN)
- err = err.decode('utf-8').splitlines()
+ self.addCleanup(unlink, TESTFN)
+ err = run_script(b"\x89")
+ self.assertIn("SyntaxError: Non-UTF-8 code starting with '\\x89' in file", err[-1])
- self.assertIn("SyntaxError: Non-UTF-8 code starting with '\\x89' in file", err[-1])
- finally:
- unlink(TESTFN)
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_string_source(self):
+ def try_compile(source):
+ with self.assertRaises(SyntaxError) as cm:
+ compile(source, '', 'exec')
+ return cm.exception
+
+ exc = try_compile('return "ä"')
+ self.assertEqual(str(exc), "'return' outside function (, line 1)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ exc = try_compile('return "ä"'.encode())
+ self.assertEqual(str(exc), "'return' outside function (, line 1)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ exc = try_compile(BOM_UTF8 + 'return "ä"'.encode())
+ self.assertEqual(str(exc), "'return' outside function (, line 1)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ exc = try_compile('# coding: latin1\nreturn "ä"'.encode('latin1'))
+ self.assertEqual(str(exc), "'return' outside function (, line 2)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ exc = try_compile('return "ä" #' + 'ä'*1000)
+ self.assertEqual(str(exc), "'return' outside function (, line 1)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ exc = try_compile('return "ä" # ' + 'ä'*1000)
+ self.assertEqual(str(exc), "'return' outside function (, line 1)")
+ self.assertIsNone(exc.text)
+ self.assertEqual(exc.offset, 1)
+ self.assertEqual(exc.end_offset, 12)
+
+ # TODO: RUSTPYTHON
+ @unittest.expectedFailure
+ def test_file_source(self):
+ self.addCleanup(unlink, TESTFN)
+ err = run_script('return "ä"')
+ self.assertEqual(err[-3:], [
+ ' return "ä"',
+ ' ^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+
+ err = run_script('return "ä"'.encode())
+ self.assertEqual(err[-3:], [
+ ' return "ä"',
+ ' ^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+
+ err = run_script(BOM_UTF8 + 'return "ä"'.encode())
+ self.assertEqual(err[-3:], [
+ ' return "ä"',
+ ' ^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+
+ err = run_script('# coding: latin1\nreturn "ä"'.encode('latin1'))
+ self.assertEqual(err[-3:], [
+ ' return "ä"',
+ ' ^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+
+ err = run_script('return "ä" #' + 'ä'*1000)
+ self.assertEqual(err[-2:], [
+ ' ^^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+ self.assertEqual(err[-3][:100], ' return "ä" #' + 'ä'*84)
+
+ err = run_script('return "ä" # ' + 'ä'*1000)
+ self.assertEqual(err[-2:], [
+ ' ^^^^^^^^^^^',
+ "SyntaxError: 'return' outside function"])
+ self.assertEqual(err[-3][:100], ' return "ä" # ' + 'ä'*83)
def test_attributes_new_constructor(self):
args = ("bad.py", 1, 2, "abcdefg", 1, 100)
diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py
index c9838cb714..d7e2c6a1de 100644
--- a/Lib/test/test_faulthandler.py
+++ b/Lib/test/test_faulthandler.py
@@ -7,9 +7,7 @@
import subprocess
import sys
from test import support
-from test.support import os_helper
-from test.support import script_helper, is_android
-from test.support import skip_if_sanitizer
+from test.support import os_helper, script_helper, is_android, MS_WINDOWS, threading_helper
import tempfile
import unittest
from textwrap import dedent
@@ -23,7 +21,6 @@
raise unittest.SkipTest("test module requires subprocess")
TIMEOUT = 0.5
-MS_WINDOWS = (os.name == 'nt')
def expected_traceback(lineno1, lineno2, header, min_count=1):
@@ -36,7 +33,7 @@ def expected_traceback(lineno1, lineno2, header, min_count=1):
return '^' + regex + '$'
def skip_segfault_on_android(test):
- # Issue #32138: Raising SIGSEGV on Android may not cause a crash.
+ # gh-76319: Raising SIGSEGV on Android may not cause a crash.
return unittest.skipIf(is_android,
'raising SIGSEGV on Android is unreliable')(test)
@@ -64,8 +61,16 @@ def get_output(self, code, filename=None, fd=None):
pass_fds = []
if fd is not None:
pass_fds.append(fd)
+ env = dict(os.environ)
+
+ # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd())
+ option = 'handle_segv=0'
+ support.set_sanitizer_env_var(env, option)
+
with support.SuppressCrashReport():
- process = script_helper.spawn_python('-c', code, pass_fds=pass_fds)
+ process = script_helper.spawn_python('-c', code,
+ pass_fds=pass_fds,
+ env=env)
with process:
output, stderr = process.communicate()
exitcode = process.wait()
@@ -243,7 +248,7 @@ def test_sigfpe(self):
faulthandler._sigfpe()
""",
3,
- 'Floating point exception')
+ 'Floating-point exception')
@unittest.skipIf(_testcapi is None, 'need _testcapi')
@unittest.skipUnless(hasattr(signal, 'SIGBUS'), 'need signal.SIGBUS')
@@ -273,6 +278,7 @@ def test_sigill(self):
5,
'Illegal instruction')
+ @unittest.skipIf(_testcapi is None, 'need _testcapi')
def check_fatal_error_func(self, release_gil):
# Test that Py_FatalError() dumps a traceback
with support.SuppressCrashReport():
@@ -282,7 +288,7 @@ def check_fatal_error_func(self, release_gil):
""",
2,
'xyz',
- func='test_fatal_error',
+ func='_testcapi_fatal_error_impl',
py_fatal_error=True)
# TODO: RUSTPYTHON
@@ -324,8 +330,6 @@ def test_gil_released(self):
# TODO: RUSTPYTHON
@unittest.expectedFailure
- @skip_if_sanitizer(memory=True, ub=True, reason="sanitizer "
- "builds change crashing process output.")
@skip_segfault_on_android
def test_enable_file(self):
with temporary_filename() as filename:
@@ -343,8 +347,6 @@ def test_enable_file(self):
@unittest.expectedFailure
@unittest.skipIf(sys.platform == "win32",
"subprocess doesn't support pass_fds on Windows")
- @skip_if_sanitizer(memory=True, ub=True, reason="sanitizer "
- "builds change crashing process output.")
@skip_segfault_on_android
def test_enable_fd(self):
with tempfile.TemporaryFile('wb+') as fp:
@@ -616,10 +618,12 @@ def run(self):
lineno = 8
else:
lineno = 10
+ # When the traceback is dumped, the waiter thread may be in the
+ # `self.running.set()` call or in `self.stop.wait()`.
regex = r"""
^Thread 0x[0-9a-f]+ \(most recent call first\):
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
- ){{1,3}} File "", line 23 in run
+ ){{1,3}} File "", line (?:22|23) in run
File ".*threading.py", line [0-9]+ in _bootstrap_inner
File ".*threading.py", line [0-9]+ in _bootstrap
@@ -735,6 +739,7 @@ def test_dump_traceback_later_fd(self):
# TODO: RUSTPYTHON
@unittest.expectedFailure
+ @support.requires_resource('walltime')
def test_dump_traceback_later_twice(self):
self.check_dump_traceback_later(loops=2)
@@ -974,6 +979,34 @@ def test_cancel_later_without_dump_traceback_later(self):
self.assertEqual(output, [])
self.assertEqual(exitcode, 0)
+ @threading_helper.requires_working_threading()
+ @unittest.skipUnless(support.Py_GIL_DISABLED, "only meaningful if the GIL is disabled")
+ def test_free_threaded_dump_traceback(self):
+ # gh-128400: Other threads need to be paused to invoke faulthandler
+ code = dedent("""
+ import faulthandler
+ from threading import Thread, Event
+
+ class Waiter(Thread):
+ def __init__(self):
+ Thread.__init__(self)
+ self.running = Event()
+ self.stop = Event()
+
+ def run(self):
+ self.running.set()
+ self.stop.wait()
+
+ for _ in range(100):
+ waiter = Waiter()
+ waiter.start()
+ waiter.running.wait()
+ faulthandler.dump_traceback(all_threads=True)
+ waiter.stop.set()
+ waiter.join()
+ """)
+ _, exitcode = self.get_output(code)
+ self.assertEqual(exitcode, 0)
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_funcattrs.py b/Lib/test/test_funcattrs.py
index 5fd268fd90..3d5378092b 100644
--- a/Lib/test/test_funcattrs.py
+++ b/Lib/test/test_funcattrs.py
@@ -176,8 +176,6 @@ def test___name__(self):
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
- # TODO: RUSTPYTHON
- @unittest.expectedFailure
def test___qualname__(self):
# PEP 3155
self.assertEqual(self.b.__qualname__, 'FuncAttrsTest.setUp..b')
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 12917755a5..61fa24fab5 100644
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -4,6 +4,8 @@
import pprint
import unittest
+from unittest.mock import patch
+
class EventCollector(html.parser.HTMLParser):
@@ -315,6 +317,16 @@ def get_events(self):
("endtag", element_lower)],
collector=Collector(convert_charrefs=False))
+ def test_EOF_in_cdata(self):
+ content = """ ¬-an-entity-ref;
+
+ ''"""
+ s = f'