From 387f88cac1e911672f6321f4fbb0a3af9710626d Mon Sep 17 00:00:00 2001 From: Savannah Ostrowski Date: Thu, 11 Dec 2025 09:42:36 -0800 Subject: [PATCH 01/10] Use subcommand over sub-command in argparse docs (#142488) --- Doc/library/argparse.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst index 71c4f094886546..d50ec34e54d710 100644 --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1679,7 +1679,7 @@ The Namespace object Other utilities --------------- -Sub-commands +Subcommands ^^^^^^^^^^^^ .. method:: ArgumentParser.add_subparsers(*, [title], [description], [prog], \ @@ -1708,7 +1708,7 @@ Sub-commands * *description* - description for the sub-parser group in help output, by default ``None`` - * *prog* - usage information that will be displayed with sub-command help, + * *prog* - usage information that will be displayed with subcommand help, by default the name of the program and any positional arguments before the subparser argument @@ -1718,7 +1718,7 @@ Sub-commands * action_ - the basic type of action to be taken when this argument is encountered at the command line - * dest_ - name of the attribute under which sub-command name will be + * dest_ - name of the attribute under which subcommand name will be stored; by default ``None`` and no value is stored * required_ - Whether or not a subcommand must be provided, by default From dac4589726952be873df13f41bea24cc6f9da6b1 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Thu, 11 Dec 2025 09:55:47 -0800 Subject: [PATCH 02/10] GH-142203: Remove the `debug_override` parameter from `packaging.util.cache_from_source()` (GH-142204) --- Doc/library/importlib.rst | 11 +++--- Lib/importlib/_bootstrap_external.py | 13 +------ Lib/test/test_importlib/test_util.py | 35 ------------------- ...-12-02-14-52-51.gh-issue-142203.ofWOvV.rst | 3 ++ 4 files changed, 8 insertions(+), 54 deletions(-) create mode 100644 Misc/NEWS.d/next/Library/2025-12-02-14-52-51.gh-issue-142203.ofWOvV.rst diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst index 34130f9be67e7e..b851b929b7e2fb 100644 --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -1300,7 +1300,7 @@ an :term:`importer`. .. versionadded:: 3.4 -.. function:: cache_from_source(path, debug_override=None, *, optimization=None) +.. function:: cache_from_source(path, *, optimization=None) Return the :pep:`3147`/:pep:`488` path to the byte-compiled file associated with the source *path*. For example, if *path* is ``/foo/bar/baz.py`` the return @@ -1319,12 +1319,6 @@ an :term:`importer`. ``/foo/bar/__pycache__/baz.cpython-32.opt-2.pyc``. The string representation of *optimization* can only be alphanumeric, else :exc:`ValueError` is raised. - The *debug_override* parameter is deprecated and can be used to override - the system's value for ``__debug__``. A ``True`` value is the equivalent of - setting *optimization* to the empty string. A ``False`` value is the same as - setting *optimization* to ``1``. If both *debug_override* an *optimization* - are not ``None`` then :exc:`TypeError` is raised. - .. versionadded:: 3.4 .. versionchanged:: 3.5 @@ -1334,6 +1328,9 @@ an :term:`importer`. .. versionchanged:: 3.6 Accepts a :term:`path-like object`. + .. versionchanged:: 3.15 + The *debug_override* parameter was removed. + .. function:: source_from_cache(path) diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py index 332dc1c5a4fc8f..9d289674357b44 100644 --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -236,7 +236,7 @@ def _write_atomic(path, data, mode=0o666): # Deprecated. DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES -def cache_from_source(path, debug_override=None, *, optimization=None): +def cache_from_source(path, *, optimization=None): """Given the path to a .py file, return the path to its .pyc file. The .py file does not need to exist; this simply returns the path to the @@ -247,20 +247,9 @@ def cache_from_source(path, debug_override=None, *, optimization=None): of the argument is taken and verified to be alphanumeric (else ValueError is raised). - The debug_override parameter is deprecated. If debug_override is not None, - a True value is the same as setting 'optimization' to the empty string - while a False value is equivalent to setting 'optimization' to '1'. - If sys.implementation.cache_tag is None then NotImplementedError is raised. """ - if debug_override is not None: - _warnings.warn('the debug_override parameter is deprecated; use ' - "'optimization' instead", DeprecationWarning) - if optimization is not None: - message = 'debug_override or optimization must be set to None' - raise TypeError(message) - optimization = '' if debug_override else 1 path = _os.fspath(path) head, tail = _path_split(path) base, sep, rest = tail.rpartition('.') diff --git a/Lib/test/test_importlib/test_util.py b/Lib/test/test_importlib/test_util.py index 0adab8d14e0452..a49e360d10fb7c 100644 --- a/Lib/test/test_importlib/test_util.py +++ b/Lib/test/test_importlib/test_util.py @@ -359,47 +359,12 @@ def test_cache_from_source_no_dot(self): self.assertEqual(self.util.cache_from_source(path, optimization=''), expect) - def test_cache_from_source_debug_override(self): - # Given the path to a .py file, return the path to its PEP 3147/PEP 488 - # defined .pyc file (i.e. under __pycache__). - path = os.path.join('foo', 'bar', 'baz', 'qux.py') - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - self.assertEqual(self.util.cache_from_source(path, False), - self.util.cache_from_source(path, optimization=1)) - self.assertEqual(self.util.cache_from_source(path, True), - self.util.cache_from_source(path, optimization='')) - with warnings.catch_warnings(): - warnings.simplefilter('error') - with self.assertRaises(DeprecationWarning): - self.util.cache_from_source(path, False) - with self.assertRaises(DeprecationWarning): - self.util.cache_from_source(path, True) - def test_cache_from_source_cwd(self): path = 'foo.py' expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag)) self.assertEqual(self.util.cache_from_source(path, optimization=''), expect) - def test_cache_from_source_override(self): - # When debug_override is not None, it can be any true-ish or false-ish - # value. - path = os.path.join('foo', 'bar', 'baz.py') - # However if the bool-ishness can't be determined, the exception - # propagates. - class Bearish: - def __bool__(self): raise RuntimeError - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - self.assertEqual(self.util.cache_from_source(path, []), - self.util.cache_from_source(path, optimization=1)) - self.assertEqual(self.util.cache_from_source(path, [17]), - self.util.cache_from_source(path, optimization='')) - with self.assertRaises(RuntimeError): - self.util.cache_from_source('/foo/bar/baz.py', Bearish()) - - def test_cache_from_source_optimization_empty_string(self): # Setting 'optimization' to '' leads to no optimization tag (PEP 488). path = 'foo.py' diff --git a/Misc/NEWS.d/next/Library/2025-12-02-14-52-51.gh-issue-142203.ofWOvV.rst b/Misc/NEWS.d/next/Library/2025-12-02-14-52-51.gh-issue-142203.ofWOvV.rst new file mode 100644 index 00000000000000..87e5870ddd6389 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-12-02-14-52-51.gh-issue-142203.ofWOvV.rst @@ -0,0 +1,3 @@ +Remove the *debug_override* parameter from +:func:`importlib.util.cache_from_source` which has been deprecated since +Python 3.5. From a26c831bc486b6e607cee6a5923bad52b97c2341 Mon Sep 17 00:00:00 2001 From: Sam Gross Date: Thu, 11 Dec 2025 14:41:03 -0500 Subject: [PATCH 03/10] gh-142589: Fix PyUnstable_Object_IsUniqueReferencedTemporary (gh-142593) PyUnstable_Object_IsUniqueReferencedTemporary wasn't handling tagged ints on the evaluation stack properly. --- Lib/test/test_capi/test_object.py | 7 +++++++ .../2025-12-11-13-01-49.gh-issue-142589.nNAqgw.rst | 2 ++ Modules/_testcapi/object.c | 10 ++++++++++ Objects/object.c | 8 ++++++-- 4 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 Misc/NEWS.d/next/C_API/2025-12-11-13-01-49.gh-issue-142589.nNAqgw.rst diff --git a/Lib/test/test_capi/test_object.py b/Lib/test/test_capi/test_object.py index c5040913e9e1f1..67572ab1ba268d 100644 --- a/Lib/test/test_capi/test_object.py +++ b/Lib/test/test_capi/test_object.py @@ -251,6 +251,13 @@ def func(x): func(object()) + # Test that a newly created object in C is not considered + # a uniquely referenced temporary, because it's not on the stack. + # gh-142586: do the test in a loop over a list to test for handling + # tagged ints on the stack. + for i in [0, 1, 2]: + self.assertFalse(_testcapi.pyobject_is_unique_temporary_new_object()) + def pyobject_dump(self, obj, release_gil=False): pyobject_dump = _testcapi.pyobject_dump diff --git a/Misc/NEWS.d/next/C_API/2025-12-11-13-01-49.gh-issue-142589.nNAqgw.rst b/Misc/NEWS.d/next/C_API/2025-12-11-13-01-49.gh-issue-142589.nNAqgw.rst new file mode 100644 index 00000000000000..529277b951ada3 --- /dev/null +++ b/Misc/NEWS.d/next/C_API/2025-12-11-13-01-49.gh-issue-142589.nNAqgw.rst @@ -0,0 +1,2 @@ +Fix :c:func:`PyUnstable_Object_IsUniqueReferencedTemporary()` handling of +tagged ints on the interpreter stack. diff --git a/Modules/_testcapi/object.c b/Modules/_testcapi/object.c index 4c9632c07a99f4..a4f76c409c6f78 100644 --- a/Modules/_testcapi/object.c +++ b/Modules/_testcapi/object.c @@ -138,6 +138,15 @@ pyobject_is_unique_temporary(PyObject *self, PyObject *obj) return PyLong_FromLong(result); } +static PyObject * +pyobject_is_unique_temporary_new_object(PyObject *self, PyObject *unused) +{ + PyObject *obj = PyList_New(0); + int result = PyUnstable_Object_IsUniqueReferencedTemporary(obj); + Py_DECREF(obj); + return PyLong_FromLong(result); +} + static int MyObject_dealloc_called = 0; static void @@ -517,6 +526,7 @@ static PyMethodDef test_methods[] = { {"pyobject_clear_weakrefs_no_callbacks", pyobject_clear_weakrefs_no_callbacks, METH_O}, {"pyobject_enable_deferred_refcount", pyobject_enable_deferred_refcount, METH_O}, {"pyobject_is_unique_temporary", pyobject_is_unique_temporary, METH_O}, + {"pyobject_is_unique_temporary_new_object", pyobject_is_unique_temporary_new_object, METH_NOARGS}, {"test_py_try_inc_ref", test_py_try_inc_ref, METH_NOARGS}, {"test_xincref_doesnt_leak",test_xincref_doesnt_leak, METH_NOARGS}, {"test_incref_doesnt_leak", test_incref_doesnt_leak, METH_NOARGS}, diff --git a/Objects/object.c b/Objects/object.c index fcea3503de8213..36a37bb0bbea4d 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2759,8 +2759,12 @@ PyUnstable_Object_IsUniqueReferencedTemporary(PyObject *op) _PyStackRef *stackpointer = frame->stackpointer; while (stackpointer > base) { stackpointer--; - if (op == PyStackRef_AsPyObjectBorrow(*stackpointer)) { - return PyStackRef_IsHeapSafe(*stackpointer); + _PyStackRef ref = *stackpointer; + if (PyStackRef_IsTaggedInt(ref)) { + continue; + } + if (op == PyStackRef_AsPyObjectBorrow(ref)) { + return PyStackRef_IsHeapSafe(ref); } } return 0; From af185727b2a4aeb39ba0c323588be495d8cc2e19 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Thu, 11 Dec 2025 11:44:46 -0800 Subject: [PATCH 04/10] GH-65961: Stop setting `__cached__` on modules (GH-142165) --- Doc/c-api/import.rst | 19 +++-- Doc/deprecations/pending-removal-in-3.15.rst | 4 +- Doc/howto/gdb_helpers.rst | 2 +- Doc/library/functions.rst | 2 +- Doc/library/importlib.rst | 3 +- Doc/library/runpy.rst | 28 +++++--- Doc/reference/datamodel.rst | 38 +++------- Doc/whatsnew/3.12.rst | 2 +- Doc/whatsnew/3.2.rst | 2 +- Lib/importlib/_bootstrap.py | 17 +---- Lib/importlib/_bootstrap_external.py | 1 - Lib/inspect.py | 12 ++-- Lib/profile.py | 1 - Lib/profiling/tracing/__init__.py | 1 - Lib/pydoc.py | 12 ++-- Lib/runpy.py | 2 - Lib/site.py | 8 +-- Lib/test/test_cmd_line_script.py | 3 - Lib/test/test_import/__init__.py | 72 ------------------- .../test_import/data/unwritable/__init__.py | 6 +- .../test_importlib/import_/test_helpers.py | 7 +- Lib/test/test_importlib/test_abc.py | 10 +-- Lib/test/test_importlib/test_api.py | 3 - Lib/test/test_importlib/test_spec.py | 1 - Lib/test/test_importlib/test_util.py | 6 -- Lib/test/test_inspect/test_inspect.py | 3 +- Lib/test/test_pkg.py | 34 +++++---- Lib/test/test_pyrepl/test_pyrepl.py | 4 +- Lib/test/test_runpy.py | 6 -- Lib/test/test_site.py | 11 --- Lib/trace.py | 2 - ...5-12-01-15-22-54.gh-issue-65961.hCJvRB.rst | 1 + Python/pythonrun.c | 6 -- 33 files changed, 93 insertions(+), 236 deletions(-) create mode 100644 Misc/NEWS.d/next/Core_and_Builtins/2025-12-01-15-22-54.gh-issue-65961.hCJvRB.rst diff --git a/Doc/c-api/import.rst b/Doc/c-api/import.rst index 1786ac6b503895..a28c0713dd3b2f 100644 --- a/Doc/c-api/import.rst +++ b/Doc/c-api/import.rst @@ -129,8 +129,7 @@ Importing Modules of :class:`~importlib.machinery.SourceFileLoader` otherwise. The module's :attr:`~module.__file__` attribute will be set to the code - object's :attr:`~codeobject.co_filename`. If applicable, - :attr:`~module.__cached__` will also be set. + object's :attr:`~codeobject.co_filename`. This function will reload the module if it was already imported. See :c:func:`PyImport_ReloadModule` for the intended way to reload a module. @@ -142,10 +141,13 @@ Importing Modules :c:func:`PyImport_ExecCodeModuleWithPathnames`. .. versionchanged:: 3.12 - The setting of :attr:`~module.__cached__` and :attr:`~module.__loader__` + The setting of ``__cached__`` and :attr:`~module.__loader__` is deprecated. See :class:`~importlib.machinery.ModuleSpec` for alternatives. + .. versionchanged:: 3.15 + ``__cached__`` is no longer set. + .. c:function:: PyObject* PyImport_ExecCodeModuleEx(const char *name, PyObject *co, const char *pathname) @@ -157,16 +159,19 @@ Importing Modules .. c:function:: PyObject* PyImport_ExecCodeModuleObject(PyObject *name, PyObject *co, PyObject *pathname, PyObject *cpathname) - Like :c:func:`PyImport_ExecCodeModuleEx`, but the :attr:`~module.__cached__` - attribute of the module object is set to *cpathname* if it is - non-``NULL``. Of the three functions, this is the preferred one to use. + Like :c:func:`PyImport_ExecCodeModuleEx`, but the path to any compiled file + via *cpathname* is used appropriately when non-``NULL``. Of the three + functions, this is the preferred one to use. .. versionadded:: 3.3 .. versionchanged:: 3.12 - Setting :attr:`~module.__cached__` is deprecated. See + Setting ``__cached__`` is deprecated. See :class:`~importlib.machinery.ModuleSpec` for alternatives. + .. versionchanged:: 3.15 + ``__cached__`` no longer set. + .. c:function:: PyObject* PyImport_ExecCodeModuleWithPathnames(const char *name, PyObject *co, const char *pathname, const char *cpathname) diff --git a/Doc/deprecations/pending-removal-in-3.15.rst b/Doc/deprecations/pending-removal-in-3.15.rst index 09cbd6f01a0580..3b9cf892fe913d 100644 --- a/Doc/deprecations/pending-removal-in-3.15.rst +++ b/Doc/deprecations/pending-removal-in-3.15.rst @@ -3,9 +3,9 @@ Pending removal in Python 3.15 * The import system: - * Setting :attr:`~module.__cached__` on a module while + * Setting ``__cached__`` on a module while failing to set :attr:`__spec__.cached ` - is deprecated. In Python 3.15, :attr:`!__cached__` will cease to be set or + is deprecated. In Python 3.15, ``__cached__`` will cease to be set or take into consideration by the import system or standard library. (:gh:`97879`) * Setting :attr:`~module.__package__` on a module while diff --git a/Doc/howto/gdb_helpers.rst b/Doc/howto/gdb_helpers.rst index 98ce813ca4ab02..33d1fbf8cd9e9e 100644 --- a/Doc/howto/gdb_helpers.rst +++ b/Doc/howto/gdb_helpers.rst @@ -136,7 +136,7 @@ enabled:: at Objects/unicodeobject.c:551 #7 0x0000000000440d94 in PyUnicodeUCS2_FromString (u=0x5c2b8d "__lltrace__") at Objects/unicodeobject.c:569 #8 0x0000000000584abd in PyDict_GetItemString (v= - {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashers/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__cached__': None, '__name__': '__main__', 'z': , '__doc__': None}, key= + {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashers/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__name__': '__main__', 'z': , '__doc__': None}, key= 0x5c2b8d "__lltrace__") at Objects/dictobject.c:2171 Notice how the dictionary argument to ``PyDict_GetItemString`` is displayed diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst index 8314fed80fa512..601745a75780fc 100644 --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -526,7 +526,7 @@ are always available. They are listed here in alphabetical order. >>> dir() # show the names in the module namespace # doctest: +SKIP ['__builtins__', '__name__', 'struct'] >>> dir(struct) # show the names in the struct module # doctest: +SKIP - ['Struct', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', + ['Struct', '__all__', '__builtins__', '__doc__', '__file__', '__initializing__', '__loader__', '__name__', '__package__', '_clearcache', 'calcsize', 'error', 'pack', 'pack_into', 'unpack', 'unpack_from'] diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst index b851b929b7e2fb..c5ea78c1683761 100644 --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -1197,8 +1197,7 @@ find and load modules. .. attribute:: cached - The filename of a compiled version of the module's code - (see :attr:`module.__cached__`). + The filename of a compiled version of the module's code. The :term:`finder` should always set this attribute but it may be ``None`` for modules that do not need compiled code stored. diff --git a/Doc/library/runpy.rst b/Doc/library/runpy.rst index b07ec6e93f80ab..64735b5a109e66 100644 --- a/Doc/library/runpy.rst +++ b/Doc/library/runpy.rst @@ -50,10 +50,10 @@ The :mod:`runpy` module provides two functions: overridden by :func:`run_module`. The special global variables ``__name__``, ``__spec__``, ``__file__``, - ``__cached__``, ``__loader__`` and ``__package__`` are set in the globals - dictionary before the module code is executed. (Note that this is a - minimal set of variables - other variables may be set implicitly as an - interpreter implementation detail.) + ``__loader__`` and ``__package__`` are set in the globals dictionary before + the module code is executed. (Note that this is a minimal set of variables - + other variables may be set implicitly as an interpreter implementation + detail.) ``__name__`` is set to *run_name* if this optional argument is not :const:`None`, to ``mod_name + '.__main__'`` if the named module is a @@ -63,7 +63,7 @@ The :mod:`runpy` module provides two functions: module (that is, ``__spec__.name`` will always be *mod_name* or ``mod_name + '.__main__'``, never *run_name*). - ``__file__``, ``__cached__``, ``__loader__`` and ``__package__`` are + ``__file__``, ``__loader__`` and ``__package__`` are :ref:`set as normal ` based on the module spec. If the argument *alter_sys* is supplied and evaluates to :const:`True`, @@ -98,6 +98,9 @@ The :mod:`runpy` module provides two functions: ``__package__`` are deprecated. See :class:`~importlib.machinery.ModuleSpec` for alternatives. + .. versionchanged:: 3.15 + ``__cached__`` is no longer set. + .. function:: run_path(path_name, init_globals=None, run_name=None) .. index:: @@ -125,23 +128,23 @@ The :mod:`runpy` module provides two functions: overridden by :func:`run_path`. The special global variables ``__name__``, ``__spec__``, ``__file__``, - ``__cached__``, ``__loader__`` and ``__package__`` are set in the globals - dictionary before the module code is executed. (Note that this is a - minimal set of variables - other variables may be set implicitly as an - interpreter implementation detail.) + ``__loader__`` and ``__package__`` are set in the globals dictionary before + the module code is executed. (Note that this is a minimal set of variables - + other variables may be set implicitly as an interpreter implementation + detail.) ``__name__`` is set to *run_name* if this optional argument is not :const:`None` and to ``''`` otherwise. If *file_path* directly references a script file (whether as source or as precompiled byte code), then ``__file__`` will be set to - *file_path*, and ``__spec__``, ``__cached__``, ``__loader__`` and + *file_path*, and ``__spec__``, ``__loader__`` and ``__package__`` will all be set to :const:`None`. If *file_path* is a reference to a valid :data:`sys.path` entry, then ``__spec__`` will be set appropriately for the imported :mod:`__main__` module (that is, ``__spec__.name`` will always be ``__main__``). - ``__file__``, ``__cached__``, ``__loader__`` and ``__package__`` will be + ``__file__``, ``__loader__`` and ``__package__`` will be :ref:`set as normal ` based on the module spec. A number of alterations are also made to the :mod:`sys` module. Firstly, @@ -173,6 +176,9 @@ The :mod:`runpy` module provides two functions: The setting of ``__cached__``, ``__loader__``, and ``__package__`` are deprecated. + .. versionchanged:: 3.15 + ``__cached__`` is no longer set. + .. seealso:: :pep:`338` -- Executing modules as scripts diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index 5f79c6fe8f50ff..97f55acf00d521 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -895,7 +895,6 @@ Attribute assignment updates the module's namespace dictionary, e.g., single: __loader__ (module attribute) single: __path__ (module attribute) single: __file__ (module attribute) - single: __cached__ (module attribute) single: __doc__ (module attribute) single: __annotations__ (module attribute) single: __annotate__ (module attribute) @@ -1044,43 +1043,28 @@ this approach. instead of :attr:`!module.__path__`. .. attribute:: module.__file__ -.. attribute:: module.__cached__ - :attr:`!__file__` and :attr:`!__cached__` are both optional attributes that + :attr:`!__file__` is an optional attribute that may or may not be set. Both attributes should be a :class:`str` when they are available. - :attr:`!__file__` indicates the pathname of the file from which the module - was loaded (if loaded from a file), or the pathname of the shared library - file for extension modules loaded dynamically from a shared library. - It might be missing for certain types of modules, such as C modules that are - statically linked into the interpreter, and the + An optional attribute, :attr:`!__file__` indicates the pathname of the file + from which the module was loaded (if loaded from a file), or the pathname of + the shared library file for extension modules loaded dynamically from a + shared library. It might be missing for certain types of modules, such as C + modules that are statically linked into the interpreter, and the :ref:`import system ` may opt to leave it unset if it has no semantic meaning (for example, a module loaded from a database). - If :attr:`!__file__` is set then the :attr:`!__cached__` attribute might - also be set, which is the path to any compiled version of - the code (for example, a byte-compiled file). The file does not need to exist - to set this attribute; the path can simply point to where the - compiled file *would* exist (see :pep:`3147`). - - Note that :attr:`!__cached__` may be set even if :attr:`!__file__` is not - set. However, that scenario is quite atypical. Ultimately, the - :term:`loader` is what makes use of the module spec provided by the - :term:`finder` (from which :attr:`!__file__` and :attr:`!__cached__` are - derived). So if a loader can load from a cached module but otherwise does - not load from a file, that atypical scenario may be appropriate. - - It is **strongly** recommended that you use - :attr:`module.__spec__.cached ` - instead of :attr:`!module.__cached__`. - .. deprecated-removed:: 3.13 3.15 - Setting :attr:`!__cached__` on a module while failing to set + Setting ``__cached__`` on a module while failing to set :attr:`!__spec__.cached` is deprecated. In Python 3.15, - :attr:`!__cached__` will cease to be set or taken into consideration by + ``__cached__`` will cease to be set or taken into consideration by the import system or standard library. + .. versionchanged:: 3.15 + ``__cached__`` is no longer set. + Other writable attributes on module objects ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index 8badfe9a6b49b9..221956f3dd3819 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -1337,7 +1337,7 @@ Deprecated it was :exc:`ImportWarning`). (Contributed by Brett Cannon in :gh:`65961`.) -* Setting :attr:`~module.__package__` or :attr:`~module.__cached__` on a +* Setting :attr:`~module.__package__` or ``__cached__`` on a module is deprecated, and will cease to be set or taken into consideration by the import system in Python 3.14. (Contributed by Brett Cannon in :gh:`65961`.) diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst index 47c4d9acbc870e..3b13d90f7692cd 100644 --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -312,7 +312,7 @@ cluttering source directories, the *pyc* files are now collected in a Aside from the filenames and target directories, the new scheme has a few aspects that are visible to the programmer: -* Imported modules now have a :attr:`~module.__cached__` attribute which stores +* Imported modules now have a ``__cached__`` attribute which stores the name of the actual file that was imported: >>> import collections diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py index 8cee9fda935050..07d938b18fe727 100644 --- a/Lib/importlib/_bootstrap.py +++ b/Lib/importlib/_bootstrap.py @@ -565,8 +565,7 @@ class ModuleSpec: `has_location` indicates that a spec's "origin" reflects a location. When this is True, `__file__` attribute of the module is set. - `cached` is the location of the cached bytecode file, if any. It - corresponds to the `__cached__` attribute. + `cached` is the location of the cached bytecode file, if any. `submodule_search_locations` is the sequence of path entries to search when importing submodules. If set, is_package should be @@ -699,10 +698,6 @@ def _spec_from_module(module, loader=None, origin=None): origin = getattr(loader, '_ORIGIN', None) if not origin and location is not None: origin = location - try: - cached = module.__cached__ - except AttributeError: - cached = None try: submodule_search_locations = list(module.__path__) except AttributeError: @@ -710,7 +705,7 @@ def _spec_from_module(module, loader=None, origin=None): spec = ModuleSpec(name, loader, origin=origin) spec._set_fileattr = False if location is None else (origin == location) - spec.cached = cached + spec.cached = None spec.submodule_search_locations = submodule_search_locations return spec @@ -770,7 +765,7 @@ def _init_module_attrs(spec, module, *, override=False): module.__path__ = spec.submodule_search_locations except AttributeError: pass - # __file__/__cached__ + # __file__ if spec.has_location: if override or getattr(module, '__file__', None) is None: try: @@ -778,12 +773,6 @@ def _init_module_attrs(spec, module, *, override=False): except AttributeError: pass - if override or getattr(module, '__cached__', None) is None: - if spec.cached is not None: - try: - module.__cached__ = spec.cached - except AttributeError: - pass return module diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py index 9d289674357b44..b576ceb1ce9f6e 100644 --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -1503,7 +1503,6 @@ def _fix_up_module(ns, name, pathname, cpathname=None): ns['__spec__'] = spec ns['__loader__'] = loader ns['__file__'] = pathname - ns['__cached__'] = cpathname except Exception: # Not important enough to report. pass diff --git a/Lib/inspect.py b/Lib/inspect.py index ff462750888c88..07c4e28f0d9952 100644 --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -3407,20 +3407,20 @@ def _main(): sys.exit(1) if args.details: - print('Target: {}'.format(target)) - print('Origin: {}'.format(getsourcefile(module))) - print('Cached: {}'.format(module.__cached__)) + print(f'Target: {target}') + print(f'Origin: {getsourcefile(module)}') + print(f'Cached: {module.__spec__.cached}') if obj is module: - print('Loader: {}'.format(repr(module.__loader__))) + print(f'Loader: {module.__loader__!r}') if hasattr(module, '__path__'): - print('Submodule search path: {}'.format(module.__path__)) + print(f'Submodule search path: {module.__path__}') else: try: __, lineno = findsource(obj) except Exception: pass else: - print('Line: {}'.format(lineno)) + print(f'Line: {lineno}') print() else: diff --git a/Lib/profile.py b/Lib/profile.py index 20c500d28bc5b9..304284da421163 100644 --- a/Lib/profile.py +++ b/Lib/profile.py @@ -607,7 +607,6 @@ def main(): '__file__': spec.origin, '__name__': spec.name, '__package__': None, - '__cached__': None, } try: runctx(code, globs, None, options.outfile, options.sort) diff --git a/Lib/profiling/tracing/__init__.py b/Lib/profiling/tracing/__init__.py index a6b8edf721611f..bd3cbf299aab3b 100644 --- a/Lib/profiling/tracing/__init__.py +++ b/Lib/profiling/tracing/__init__.py @@ -201,7 +201,6 @@ def main(): '__file__': spec.origin, '__name__': spec.name, '__package__': None, - '__cached__': None, }) try: diff --git a/Lib/pydoc.py b/Lib/pydoc.py index 45ff5fca308c14..ee4457d9d3a932 100644 --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -241,12 +241,12 @@ def visiblename(name, all=None, obj=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant or internal. # XXX Remove __initializing__? - if name in {'__author__', '__builtins__', '__cached__', '__credits__', - '__date__', '__doc__', '__file__', '__spec__', - '__loader__', '__module__', '__name__', '__package__', - '__path__', '__qualname__', '__slots__', '__version__', - '__static_attributes__', '__firstlineno__', - '__annotate_func__', '__annotations_cache__'}: + if name in {'__author__', '__builtins__', '__credits__', '__date__', + '__doc__', '__file__', '__spec__', '__loader__', '__module__', + '__name__', '__package__', '__path__', '__qualname__', + '__slots__', '__version__', '__static_attributes__', + '__firstlineno__', '__annotate_func__', + '__annotations_cache__'}: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 diff --git a/Lib/runpy.py b/Lib/runpy.py index f072498f6cb405..9f62d20e9a2322 100644 --- a/Lib/runpy.py +++ b/Lib/runpy.py @@ -80,7 +80,6 @@ def _run_code(code, run_globals, init_globals=None, pkg_name = mod_spec.parent run_globals.update(__name__ = mod_name, __file__ = fname, - __cached__ = cached, __doc__ = None, __loader__ = loader, __package__ = pkg_name, @@ -180,7 +179,6 @@ def _run_module_as_main(mod_name, alter_argv=True): At the very least, these variables in __main__ will be overwritten: __name__ __file__ - __cached__ __loader__ __package__ """ diff --git a/Lib/site.py b/Lib/site.py index 7c6810792cfa7e..1b7a656551b853 100644 --- a/Lib/site.py +++ b/Lib/site.py @@ -111,7 +111,7 @@ def makepath(*paths): def abs_paths(): - """Set all module __file__ and __cached__ attributes to an absolute path""" + """Set __file__ to an absolute path.""" for m in set(sys.modules.values()): loader_module = None try: @@ -127,10 +127,6 @@ def abs_paths(): m.__file__ = os.path.abspath(m.__file__) except (AttributeError, OSError, TypeError): pass - try: - m.__cached__ = os.path.abspath(m.__cached__) - except (AttributeError, OSError, TypeError): - pass def removeduppaths(): @@ -699,7 +695,7 @@ def main(): known_paths = removeduppaths() if orig_path != sys.path: # removeduppaths() might make sys.path absolute. - # fix __file__ and __cached__ of already imported modules too. + # Fix __file__ of already imported modules too. abs_paths() known_paths = venv(known_paths) diff --git a/Lib/test/test_cmd_line_script.py b/Lib/test/test_cmd_line_script.py index cc1a625a5097d8..8695df9eb0c294 100644 --- a/Lib/test/test_cmd_line_script.py +++ b/Lib/test/test_cmd_line_script.py @@ -44,7 +44,6 @@ def f(): _loader = __loader__ if __loader__ is BuiltinImporter else type(__loader__) print('__loader__==%a' % _loader) print('__file__==%a' % __file__) -print('__cached__==%a' % __cached__) print('__package__==%r' % __package__) # Check PEP 451 details import os.path @@ -58,8 +57,6 @@ def f(): assertEqual(__spec__.parent, __package__) assertIdentical(__spec__.submodule_search_locations, None) assertEqual(__spec__.origin, __file__) - if __spec__.cached is not None: - assertEqual(__spec__.cached, __cached__) # Check the sys module import sys assertIdentical(globals(), sys.modules[__name__].__dict__) diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py index bc61ddc4f03675..c5cabc6477c8e6 100644 --- a/Lib/test/test_import/__init__.py +++ b/Lib/test/test_import/__init__.py @@ -1713,78 +1713,6 @@ def test_missing_source_legacy(self): finally: os.remove(pyc_file) - def test___cached__(self): - # Modules now also have an __cached__ that points to the pyc file. - m = __import__(TESTFN) - pyc_file = importlib.util.cache_from_source(TESTFN + '.py') - self.assertEqual(m.__cached__, os.path.join(os.getcwd(), pyc_file)) - - @skip_if_dont_write_bytecode - def test___cached___legacy_pyc(self): - # Like test___cached__() except that for backward compatibility, - # when the pyc file lives where the py file would have been (and named - # without the tag), it is importable. The __cached__ of the imported - # module is the pyc location. - __import__(TESTFN) - # pyc_file gets removed in _clean() via tearDown(). - pyc_file = make_legacy_pyc(self.source) - os.remove(self.source) - unload(TESTFN) - importlib.invalidate_caches() - m = __import__(TESTFN) - self.assertEqual(m.__cached__, - os.path.join(os.getcwd(), os.path.relpath(pyc_file))) - - @skip_if_dont_write_bytecode - def test_package___cached__(self): - # Like test___cached__ but for packages. - def cleanup(): - rmtree('pep3147') - unload('pep3147.foo') - unload('pep3147') - os.mkdir('pep3147') - self.addCleanup(cleanup) - # Touch the __init__.py - with open(os.path.join('pep3147', '__init__.py'), 'wb'): - pass - with open(os.path.join('pep3147', 'foo.py'), 'wb'): - pass - importlib.invalidate_caches() - m = __import__('pep3147.foo') - init_pyc = importlib.util.cache_from_source( - os.path.join('pep3147', '__init__.py')) - self.assertEqual(m.__cached__, os.path.join(os.getcwd(), init_pyc)) - foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py')) - self.assertEqual(sys.modules['pep3147.foo'].__cached__, - os.path.join(os.getcwd(), foo_pyc)) - - def test_package___cached___from_pyc(self): - # Like test___cached__ but ensuring __cached__ when imported from a - # PEP 3147 pyc file. - def cleanup(): - rmtree('pep3147') - unload('pep3147.foo') - unload('pep3147') - os.mkdir('pep3147') - self.addCleanup(cleanup) - # Touch the __init__.py - with open(os.path.join('pep3147', '__init__.py'), 'wb'): - pass - with open(os.path.join('pep3147', 'foo.py'), 'wb'): - pass - importlib.invalidate_caches() - m = __import__('pep3147.foo') - unload('pep3147.foo') - unload('pep3147') - importlib.invalidate_caches() - m = __import__('pep3147.foo') - init_pyc = importlib.util.cache_from_source( - os.path.join('pep3147', '__init__.py')) - self.assertEqual(m.__cached__, os.path.join(os.getcwd(), init_pyc)) - foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py')) - self.assertEqual(sys.modules['pep3147.foo'].__cached__, - os.path.join(os.getcwd(), foo_pyc)) - def test_recompute_pyc_same_second(self): # Even when the source file doesn't change timestamp, a change in # source size is enough to trigger recomputation of the pyc file. diff --git a/Lib/test/test_import/data/unwritable/__init__.py b/Lib/test/test_import/data/unwritable/__init__.py index da4ddb3d027c34..1d61ff348d8d15 100644 --- a/Lib/test/test_import/data/unwritable/__init__.py +++ b/Lib/test/test_import/data/unwritable/__init__.py @@ -1,9 +1,9 @@ import sys class MyMod(object): - __slots__ = ['__builtins__', '__cached__', '__doc__', - '__file__', '__loader__', '__name__', - '__package__', '__path__', '__spec__'] + __slots__ = ['__builtins__', '__doc__', '__file__', + '__loader__', '__name__', '__package__', + '__path__', '__spec__'] def __init__(self): for attr in self.__slots__: setattr(self, attr, globals()[attr]) diff --git a/Lib/test/test_importlib/import_/test_helpers.py b/Lib/test/test_importlib/import_/test_helpers.py index 550f88d1d7a651..7587276a41e953 100644 --- a/Lib/test/test_importlib/import_/test_helpers.py +++ b/Lib/test/test_importlib/import_/test_helpers.py @@ -19,8 +19,7 @@ def test_no_loader_but_spec(self): ns = {"__spec__": spec} _bootstrap_external._fix_up_module(ns, name, path) - expected = {"__spec__": spec, "__loader__": loader, "__file__": path, - "__cached__": None} + expected = {"__spec__": spec, "__loader__": loader, "__file__": path} self.assertEqual(ns, expected) def test_no_loader_no_spec_but_sourceless(self): @@ -29,7 +28,7 @@ def test_no_loader_no_spec_but_sourceless(self): ns = {} _bootstrap_external._fix_up_module(ns, name, path, path) - expected = {"__file__": path, "__cached__": path} + expected = {"__file__": path} for key, val in expected.items(): with self.subTest(f"{key}: {val}"): @@ -51,7 +50,7 @@ def test_no_loader_no_spec_but_source(self): ns = {} _bootstrap_external._fix_up_module(ns, name, path) - expected = {"__file__": path, "__cached__": None} + expected = {"__file__": path} for key, val in expected.items(): with self.subTest(f"{key}: {val}"): diff --git a/Lib/test/test_importlib/test_abc.py b/Lib/test/test_importlib/test_abc.py index 8132a69d8f4e89..7c146ea853b0d9 100644 --- a/Lib/test/test_importlib/test_abc.py +++ b/Lib/test/test_importlib/test_abc.py @@ -510,8 +510,7 @@ def test_get_code_no_path(self): class SourceOnlyLoader: # Globals that should be defined for all modules. - source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, " - b"repr(__loader__)])") + source = (b"_ = '::'.join([__name__, __file__, __package__, repr(__loader__)])") def __init__(self, path): self.path = path @@ -586,20 +585,17 @@ def setUp(self, *, is_package=True, **kwargs): def verify_module(self, module): self.assertEqual(module.__name__, self.name) self.assertEqual(module.__file__, self.path) - self.assertEqual(module.__cached__, self.cached) self.assertEqual(module.__package__, self.package) self.assertEqual(module.__loader__, self.loader) values = module._.split('::') self.assertEqual(values[0], self.name) self.assertEqual(values[1], self.path) - self.assertEqual(values[2], self.cached) - self.assertEqual(values[3], self.package) - self.assertEqual(values[4], repr(self.loader)) + self.assertEqual(values[2], self.package) + self.assertEqual(values[3], repr(self.loader)) def verify_code(self, code_object): module = types.ModuleType(self.name) module.__file__ = self.path - module.__cached__ = self.cached module.__package__ = self.package module.__loader__ = self.loader module.__path__ = [] diff --git a/Lib/test/test_importlib/test_api.py b/Lib/test/test_importlib/test_api.py index 1bc531a2fe34e7..4de0cf029a81e0 100644 --- a/Lib/test/test_importlib/test_api.py +++ b/Lib/test/test_importlib/test_api.py @@ -235,7 +235,6 @@ def test_reload_location_changed(self): expected = {'__name__': name, '__package__': '', '__file__': path, - '__cached__': cached, '__doc__': None, } os_helper.create_empty_file(path) @@ -256,7 +255,6 @@ def test_reload_location_changed(self): expected = {'__name__': name, '__package__': name, '__file__': init_path, - '__cached__': cached, '__path__': [os.path.dirname(init_path)], '__doc__': None, } @@ -316,7 +314,6 @@ def test_reload_namespace_changed(self): expected = {'__name__': name, '__package__': name, '__file__': init_path, - '__cached__': cached, '__path__': [os.path.dirname(init_path)], '__doc__': None, 'eggs': None, diff --git a/Lib/test/test_importlib/test_spec.py b/Lib/test/test_importlib/test_spec.py index fef0fda101e46d..b48d0a101ca9e7 100644 --- a/Lib/test/test_importlib/test_spec.py +++ b/Lib/test/test_importlib/test_spec.py @@ -336,7 +336,6 @@ def test_reload_init_module_attrs(self): self.assertIs(loaded.__spec__, self.spec) self.assertNotHasAttr(loaded, '__path__') self.assertNotHasAttr(loaded, '__file__') - self.assertNotHasAttr(loaded, '__cached__') (Frozen_ModuleSpecMethodsTests, diff --git a/Lib/test/test_importlib/test_util.py b/Lib/test/test_importlib/test_util.py index a49e360d10fb7c..17a211f10fa0ac 100644 --- a/Lib/test/test_importlib/test_util.py +++ b/Lib/test/test_importlib/test_util.py @@ -124,12 +124,6 @@ def test___file__(self): module = self.util.module_from_spec(spec) self.assertEqual(module.__file__, spec.origin) - def test___cached__(self): - spec = self.machinery.ModuleSpec('test', object()) - spec.cached = 'some/path' - spec.has_location = True - module = self.util.module_from_spec(spec) - self.assertEqual(module.__cached__, spec.cached) (Frozen_ModuleFromSpecTests, Source_ModuleFromSpecTests diff --git a/Lib/test/test_inspect/test_inspect.py b/Lib/test/test_inspect/test_inspect.py index dd3b7d9c5b4b5b..075e1802bebc3e 100644 --- a/Lib/test/test_inspect/test_inspect.py +++ b/Lib/test/test_inspect/test_inspect.py @@ -6494,13 +6494,12 @@ def test_details(self): rc, out, err = assert_python_ok(*args, '-m', 'inspect', 'unittest', '--details') output = out.decode() - # Just a quick sanity check on the output + # Just a quick safety check on the output self.assertIn(module.__spec__.name, output) self.assertIn(module.__name__, output) self.assertIn(module.__spec__.origin, output) self.assertIn(module.__file__, output) self.assertIn(module.__spec__.cached, output) - self.assertIn(module.__cached__, output) self.assertEqual(err, b'') diff --git a/Lib/test/test_pkg.py b/Lib/test/test_pkg.py index d2b724db40d3e9..0a366e2a5bb2d1 100644 --- a/Lib/test/test_pkg.py +++ b/Lib/test/test_pkg.py @@ -198,15 +198,15 @@ def test_5(self): import t5 self.assertEqual(fixdir(dir(t5)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__path__', '__spec__', - 'foo', 'string', 't5']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__path__', '__spec__', 'foo', + 'string', 't5']) self.assertEqual(fixdir(dir(t5.foo)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__spec__', 'string']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__spec__', 'string']) self.assertEqual(fixdir(dir(t5.string)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__spec__', 'spam']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__spec__', 'spam']) def test_6(self): hier = [ @@ -221,14 +221,13 @@ def test_6(self): import t6 self.assertEqual(fixdir(dir(t6)), - ['__all__', '__cached__', '__doc__', '__file__', - '__loader__', '__name__', '__package__', '__path__', - '__spec__']) + ['__all__', '__doc__', '__file__', '__loader__', + '__name__', '__package__', '__path__', '__spec__']) s = """ import t6 from t6 import * self.assertEqual(fixdir(dir(t6)), - ['__all__', '__cached__', '__doc__', '__file__', + ['__all__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', 'eggs', 'ham', 'spam']) self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6']) @@ -256,20 +255,19 @@ def test_7(self): t7, sub, subsub = None, None, None import t7 as tas self.assertEqual(fixdir(dir(tas)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__path__', '__spec__']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__path__', '__spec__']) self.assertFalse(t7) from t7 import sub as subpar self.assertEqual(fixdir(dir(subpar)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__path__', '__spec__']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__path__', '__spec__']) self.assertFalse(t7) self.assertFalse(sub) from t7.sub import subsub as subsubsub self.assertEqual(fixdir(dir(subsubsub)), - ['__cached__', '__doc__', '__file__', '__loader__', - '__name__', '__package__', '__path__', '__spec__', - 'spam']) + ['__doc__', '__file__', '__loader__', '__name__', + '__package__', '__path__', '__spec__', 'spam']) self.assertFalse(t7) self.assertFalse(sub) self.assertFalse(subsub) diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py index e298b2add52c3e..ddcaafc9b7dbe8 100644 --- a/Lib/test/test_pyrepl/test_pyrepl.py +++ b/Lib/test/test_pyrepl/test_pyrepl.py @@ -1443,10 +1443,10 @@ def test_exposed_globals_in_repl(self): case2 = f"{pre}, '__doc__', '__file__', {post}" in output # if `__main__` is a cached .pyc file and the .py source exists - case3 = f"{pre}, '__cached__', '__doc__', '__file__', {post}" in output + case3 = f"{pre}, '__doc__', '__file__', {post}" in output # if `__main__` is a cached .pyc file but there's no .py source file - case4 = f"{pre}, '__cached__', '__doc__', {post}" in output + case4 = f"{pre}, '__doc__', {post}" in output self.assertTrue(case1 or case2 or case3 or case4, output) diff --git a/Lib/test/test_runpy.py b/Lib/test/test_runpy.py index cc76b72b9639eb..254a009a69718b 100644 --- a/Lib/test/test_runpy.py +++ b/Lib/test/test_runpy.py @@ -57,7 +57,6 @@ def f(): implicit_namespace = { "__name__": None, "__file__": None, - "__cached__": None, "__package__": None, "__doc__": None, "__spec__": None @@ -286,7 +285,6 @@ def _del_pkg(self, top): def _fix_ns_for_legacy_pyc(self, ns, alter_sys): char_to_add = "c" ns["__file__"] += char_to_add - ns["__cached__"] = ns["__file__"] spec = ns["__spec__"] new_spec = importlib.util.spec_from_file_location(spec.name, ns["__file__"]) @@ -306,7 +304,6 @@ def _check_module(self, depth, alter_sys=False, expected_ns.update({ "__name__": mod_name, "__file__": mod_fname, - "__cached__": mod_spec.cached, "__package__": mod_name.rpartition(".")[0], "__spec__": mod_spec, }) @@ -347,7 +344,6 @@ def _check_package(self, depth, alter_sys=False, expected_ns.update({ "__name__": mod_name, "__file__": mod_fname, - "__cached__": importlib.util.cache_from_source(mod_fname), "__package__": pkg_name, "__spec__": mod_spec, }) @@ -552,7 +548,6 @@ def test_run_name(self): expected_ns.update({ "__name__": run_name, "__file__": mod_fname, - "__cached__": importlib.util.cache_from_source(mod_fname), "__package__": mod_name.rpartition(".")[0], "__spec__": mod_spec, }) @@ -632,7 +627,6 @@ def create_ns(init_globals): expected_ns.update({ "__name__": expected_name, "__file__": expected_file, - "__cached__": mod_cached, "__package__": "", "__spec__": mod_spec, "run_argv0": expected_argv0, diff --git a/Lib/test/test_site.py b/Lib/test/test_site.py index 27ae3539b554ef..e7dc5e2611c2de 100644 --- a/Lib/test/test_site.py +++ b/Lib/test/test_site.py @@ -466,17 +466,6 @@ def tearDown(self): """Restore sys.path""" sys.path[:] = self.sys_path - def test_abs_paths_cached_None(self): - """Test for __cached__ is None. - - Regarding to PEP 3147, __cached__ can be None. - - See also: https://bugs.python.org/issue30167 - """ - sys.modules['test'].__cached__ = None - site.abs_paths() - self.assertIsNone(sys.modules['test'].__cached__) - def test_no_duplicate_paths(self): # No duplicate paths should exist in sys.path # Handled by removeduppaths() diff --git a/Lib/trace.py b/Lib/trace.py index cf8817f4383fc1..cd3a6d30661da3 100644 --- a/Lib/trace.py +++ b/Lib/trace.py @@ -721,7 +721,6 @@ def parse_ignore_dir(s): '__package__': mod_spec.parent, '__loader__': mod_spec.loader, '__spec__': mod_spec, - '__cached__': None, } else: sys.argv = [opts.progname, *opts.arguments] @@ -734,7 +733,6 @@ def parse_ignore_dir(s): '__file__': opts.progname, '__name__': '__main__', '__package__': None, - '__cached__': None, } t.runctx(code, globs, globs) except OSError as err: diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-12-01-15-22-54.gh-issue-65961.hCJvRB.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-12-01-15-22-54.gh-issue-65961.hCJvRB.rst new file mode 100644 index 00000000000000..59ab00ac8321f6 --- /dev/null +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-12-01-15-22-54.gh-issue-65961.hCJvRB.rst @@ -0,0 +1 @@ +Stop setting ``__cached__`` on modules. diff --git a/Python/pythonrun.c b/Python/pythonrun.c index 272be504a68fa1..f2c402eb1a03b5 100644 --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -478,9 +478,6 @@ _PyRun_SimpleFileObject(FILE *fp, PyObject *filename, int closeit, if (PyDict_SetItemString(dict, "__file__", filename) < 0) { goto done; } - if (PyDict_SetItemString(dict, "__cached__", Py_None) < 0) { - goto done; - } set_file_name = 1; } @@ -535,9 +532,6 @@ _PyRun_SimpleFileObject(FILE *fp, PyObject *filename, int closeit, if (PyDict_PopString(dict, "__file__", NULL) < 0) { PyErr_Print(); } - if (PyDict_PopString(dict, "__cached__", NULL) < 0) { - PyErr_Print(); - } } Py_XDECREF(main_module); return ret; From e38967ed60778146050cc88dd16b70bfd867fde7 Mon Sep 17 00:00:00 2001 From: Neil Schemenauer Date: Thu, 11 Dec 2025 12:30:56 -0800 Subject: [PATCH 05/10] gh-142531: Fix free-threaded GC performance regression (gh-142562) If there are many untracked tuples, the GC will run too often, resulting in poor performance. The fix is to include untracked tuples in the "long lived" object count. The number of frozen objects is also now included since the free-threaded GC must scan those too. --- Lib/test/test_gc.py | 18 ++++++++++ ...-12-10-23-03-10.gh-issue-142531.NUEa1T.rst | 5 +++ Modules/_testinternalcapi.c | 8 +++++ Python/gc_free_threading.c | 36 +++++++++++++++---- 4 files changed, 61 insertions(+), 6 deletions(-) create mode 100644 Misc/NEWS.d/next/Core_and_Builtins/2025-12-10-23-03-10.gh-issue-142531.NUEa1T.rst diff --git a/Lib/test/test_gc.py b/Lib/test/test_gc.py index ec5df4d20e7085..6aa6361d5d0b92 100644 --- a/Lib/test/test_gc.py +++ b/Lib/test/test_gc.py @@ -1231,6 +1231,24 @@ def test(): assert_python_ok("-c", code_inside_function) + @unittest.skipUnless(Py_GIL_DISABLED, "requires free-threaded GC") + @unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi") + def test_tuple_untrack_counts(self): + # This ensures that the free-threaded GC is counting untracked tuples + # in the "long_lived_total" count. This is required to avoid + # performance issues from running the GC too frequently. See + # GH-142531 as an example. + gc.collect() + count = _testinternalcapi.get_long_lived_total() + n = 20_000 + tuples = [(x,) for x in range(n)] + gc.collect() + new_count = _testinternalcapi.get_long_lived_total() + self.assertFalse(gc.is_tracked(tuples[0])) + # Use n // 2 just in case some other objects were collected. + self.assertTrue(new_count - count > (n // 2)) + + class IncrementalGCTests(unittest.TestCase): @unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi") @requires_gil_enabled("Free threading does not support incremental GC") diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-12-10-23-03-10.gh-issue-142531.NUEa1T.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-12-10-23-03-10.gh-issue-142531.NUEa1T.rst new file mode 100644 index 00000000000000..15e03c1b9dd03f --- /dev/null +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-12-10-23-03-10.gh-issue-142531.NUEa1T.rst @@ -0,0 +1,5 @@ +Fix a free-threaded GC performance regression. If there are many untracked +tuples, the GC will run too often, resulting in poor performance. The fix +is to include untracked tuples in the "long lived" object count. The number +of frozen objects is also now included since the free-threaded GC must +scan those too. diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index 89e558b0fe8933..4140cd23ded95e 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -2250,6 +2250,13 @@ get_tlbc_id(PyObject *Py_UNUSED(module), PyObject *obj) } return PyLong_FromVoidPtr(bc); } + +static PyObject * +get_long_lived_total(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + return PyLong_FromInt64(PyInterpreterState_Get()->gc.long_lived_total); +} + #endif static PyObject * @@ -2590,6 +2597,7 @@ static PyMethodDef module_functions[] = { {"py_thread_id", get_py_thread_id, METH_NOARGS}, {"get_tlbc", get_tlbc, METH_O, NULL}, {"get_tlbc_id", get_tlbc_id, METH_O, NULL}, + {"get_long_lived_total", get_long_lived_total, METH_NOARGS}, #endif #ifdef _Py_TIER2 {"uop_symbols_test", _Py_uop_symbols_test, METH_NOARGS}, diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index e672e870db2f27..7ba94d5381b72e 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -375,6 +375,19 @@ op_from_block(void *block, void *arg, bool include_frozen) return op; } +// As above but returns untracked and frozen objects as well. +static PyObject * +op_from_block_all_gc(void *block, void *arg) +{ + struct visitor_args *a = arg; + if (block == NULL) { + return NULL; + } + PyObject *op = (PyObject *)((char*)block + a->offset); + assert(PyObject_IS_GC(op)); + return op; +} + static int gc_visit_heaps_lock_held(PyInterpreterState *interp, mi_block_visit_fun *visitor, struct visitor_args *arg) @@ -1186,12 +1199,20 @@ static bool scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *args) { - PyObject *op = op_from_block(block, args, false); + PyObject *op = op_from_block_all_gc(block, args); if (op == NULL) { return true; } - struct collection_state *state = (struct collection_state *)args; + // The free-threaded GC cost is proportional to the number of objects in + // the mimalloc GC heap and so we should include the counts for untracked + // and frozen objects as well. This is especially important if many + // tuples have been untracked. + state->long_lived_total++; + if (!_PyObject_GC_IS_TRACKED(op) || gc_is_frozen(op)) { + return true; + } + if (gc_is_unreachable(op)) { // Disable deferred refcounting for unreachable objects so that they // are collected immediately after finalization. @@ -1209,6 +1230,9 @@ scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area, else { worklist_push(&state->unreachable, op); } + // It is possible this object will be resurrected but + // for now we assume it will be deallocated. + state->long_lived_total--; return true; } @@ -1222,7 +1246,6 @@ scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area, // object is reachable, restore `ob_tid`; we're done with these objects gc_restore_tid(op); gc_clear_alive(op); - state->long_lived_total++; return true; } @@ -1891,6 +1914,7 @@ handle_resurrected_objects(struct collection_state *state) _PyObject_ASSERT(op, Py_REFCNT(op) > 1); worklist_remove(&iter); merge_refcount(op, -1); // remove worklist reference + state->long_lived_total++; } } } @@ -2303,9 +2327,6 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, } } - // Record the number of live GC objects - interp->gc.long_lived_total = state->long_lived_total; - // Find weakref callbacks we will honor (but do not call them). find_weakref_callbacks(state); _PyEval_StartTheWorld(interp); @@ -2326,8 +2347,11 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, if (err == 0) { clear_weakrefs(state); } + // Record the number of live GC objects + interp->gc.long_lived_total = state->long_lived_total; _PyEval_StartTheWorld(interp); + if (err < 0) { cleanup_worklist(&state->unreachable); cleanup_worklist(&state->legacy_finalizers); From 6a0135a392992a38dc5199045748bc10a4e451e8 Mon Sep 17 00:00:00 2001 From: Pablo Galindo Salgado Date: Thu, 11 Dec 2025 20:46:34 +0000 Subject: [PATCH 06/10] gh-138122: Add exception profiling mode to the sampling profiler (#142561) --- Doc/library/profiling.sampling.rst | 71 ++- Doc/whatsnew/3.15.rst | 6 + Include/internal/pycore_debug_offsets.h | 12 + .../sampling/_flamegraph_assets/flamegraph.js | 4 + .../flamegraph_template.html | 4 + Lib/profiling/sampling/cli.py | 8 +- Lib/profiling/sampling/collector.py | 9 +- Lib/profiling/sampling/constants.py | 3 + Lib/profiling/sampling/gecko_collector.py | 16 +- .../sampling/live_collector/collector.py | 8 + .../sampling/live_collector/widgets.py | 12 + Lib/profiling/sampling/sample.py | 7 +- Lib/profiling/sampling/stack_collector.py | 9 +- Lib/test/test_external_inspection.py | 408 ++++++++++++++++++ .../test_sampling_profiler/test_modes.py | 191 ++++++++ ...-12-11-04-18-49.gh-issue-138122.m3EF9E.rst | 3 + Modules/_remote_debugging/_remote_debugging.h | 12 +- Modules/_remote_debugging/module.c | 6 +- Modules/_remote_debugging/threads.c | 30 ++ 19 files changed, 802 insertions(+), 17 deletions(-) create mode 100644 Misc/NEWS.d/next/Library/2025-12-11-04-18-49.gh-issue-138122.m3EF9E.rst diff --git a/Doc/library/profiling.sampling.rst b/Doc/library/profiling.sampling.rst index 23e9173a815d22..2bad2a8d1ab353 100644 --- a/Doc/library/profiling.sampling.rst +++ b/Doc/library/profiling.sampling.rst @@ -470,9 +470,10 @@ which you can use to judge whether the data is sufficient for your analysis. Profiling modes =============== -The sampling profiler supports three modes that control which samples are +The sampling profiler supports four modes that control which samples are recorded. The mode determines what the profile measures: total elapsed time, -CPU execution time, or time spent holding the global interpreter lock. +CPU execution time, time spent holding the global interpreter lock, or +exception handling. Wall-clock mode @@ -553,6 +554,67 @@ single-threaded programs to distinguish Python execution time from time spent in C extensions or I/O. +Exception mode +-------------- + +Exception mode (``--mode=exception``) records samples only when a thread has +an active exception:: + + python -m profiling.sampling run --mode=exception script.py + +Samples are recorded in two situations: when an exception is being propagated +up the call stack (after ``raise`` but before being caught), or when code is +executing inside an ``except`` block where exception information is still +present in the thread state. + +The following example illustrates which code regions are captured: + +.. code-block:: python + + def example(): + try: + raise ValueError("error") # Captured: exception being raised + except ValueError: + process_error() # Captured: inside except block + finally: + cleanup() # NOT captured: exception already handled + + def example_propagating(): + try: + try: + raise ValueError("error") + finally: + cleanup() # Captured: exception propagating through + except ValueError: + pass + + def example_no_exception(): + try: + do_work() + finally: + cleanup() # NOT captured: no exception involved + +Note that ``finally`` blocks are only captured when an exception is actively +propagating through them. Once an ``except`` block finishes executing, Python +clears the exception information before running any subsequent ``finally`` +block. Similarly, ``finally`` blocks that run during normal execution (when no +exception was raised) are not captured because no exception state is present. + +This mode is useful for understanding where your program spends time handling +errors. Exception handling can be a significant source of overhead in code +that uses exceptions for flow control (such as ``StopIteration`` in iterators) +or in applications that process many error conditions (such as network servers +handling connection failures). + +Exception mode helps answer questions like "how much time is spent handling +exceptions?" and "which exception handlers are the most expensive?" It can +reveal hidden performance costs in code that catches and processes many +exceptions, even when those exceptions are handled gracefully. For example, +if a parsing library uses exceptions internally to signal format errors, this +mode will capture time spent in those handlers even if the calling code never +sees the exceptions. + + Output formats ============== @@ -1006,8 +1068,9 @@ Mode options .. option:: --mode - Sampling mode: ``wall`` (default), ``cpu``, or ``gil``. - The ``cpu`` and ``gil`` modes are incompatible with ``--async-aware``. + Sampling mode: ``wall`` (default), ``cpu``, ``gil``, or ``exception``. + The ``cpu``, ``gil``, and ``exception`` modes are incompatible with + ``--async-aware``. .. option:: --async-mode diff --git a/Doc/whatsnew/3.15.rst b/Doc/whatsnew/3.15.rst index 853c47d4402f20..9d4686f982a99a 100644 --- a/Doc/whatsnew/3.15.rst +++ b/Doc/whatsnew/3.15.rst @@ -146,6 +146,8 @@ Key features include: and blocking. Use this to identify CPU-bound bottlenecks and optimize computational work. * **GIL-holding time** (``--mode gil``): Measures time spent holding Python's Global Interpreter Lock. Use this to identify which threads dominate GIL usage in multi-threaded applications. + * **Exception handling time** (``--mode exception``): Captures samples only from threads with + an active exception. Use this to analyze exception handling overhead. * **Thread-aware profiling**: Option to profile all threads (``-a``) or just the main thread, essential for understanding multi-threaded application behavior. @@ -175,6 +177,10 @@ Key features include: (``--async-aware``). See which coroutines are consuming time, with options to show only running tasks or all tasks including those waiting. +* **Opcode-level profiling**: Gather bytecode opcode information for instruction-level + profiling (``--opcodes``). Shows which bytecode instructions are executing, including + specializations from the adaptive interpreter. + See :mod:`profiling.sampling` for the complete documentation, including all available output formats, profiling modes, and configuration options. diff --git a/Include/internal/pycore_debug_offsets.h b/Include/internal/pycore_debug_offsets.h index 1cdc4449b173e8..66f14e69f33f44 100644 --- a/Include/internal/pycore_debug_offsets.h +++ b/Include/internal/pycore_debug_offsets.h @@ -110,8 +110,15 @@ typedef struct _Py_DebugOffsets { uint64_t status; uint64_t holds_gil; uint64_t gil_requested; + uint64_t current_exception; + uint64_t exc_state; } thread_state; + // Exception stack item offset + struct { + uint64_t exc_value; + } err_stackitem; + // InterpreterFrame offset; struct _interpreter_frame { uint64_t size; @@ -282,6 +289,11 @@ typedef struct _Py_DebugOffsets { .status = offsetof(PyThreadState, _status), \ .holds_gil = offsetof(PyThreadState, holds_gil), \ .gil_requested = offsetof(PyThreadState, gil_requested), \ + .current_exception = offsetof(PyThreadState, current_exception), \ + .exc_state = offsetof(PyThreadState, exc_state), \ + }, \ + .err_stackitem = { \ + .exc_value = offsetof(_PyErr_StackItem, exc_value), \ }, \ .interpreter_frame = { \ .size = sizeof(_PyInterpreterFrame), \ diff --git a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js index 3076edd1d68cba..0370c18a25049f 100644 --- a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js +++ b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js @@ -717,6 +717,10 @@ function populateThreadStats(data, selectedThreadId = null) { const gcPctElem = document.getElementById('gc-pct'); if (gcPctElem) gcPctElem.textContent = `${(threadStats.gc_pct || 0).toFixed(1)}%`; + + // Exception stats + const excPctElem = document.getElementById('exc-pct'); + if (excPctElem) excPctElem.textContent = `${(threadStats.has_exception_pct || 0).toFixed(1)}%`; } // ============================================================================ diff --git a/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html b/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html index 82102c229e7af9..29e5fdd3f35069 100644 --- a/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html +++ b/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html @@ -161,6 +161,10 @@

Runtime Stats

--
GC
+
+
--
+
Exception
+
diff --git a/Lib/profiling/sampling/cli.py b/Lib/profiling/sampling/cli.py index 22bfce8c2ead99..3a0444db4c3636 100644 --- a/Lib/profiling/sampling/cli.py +++ b/Lib/profiling/sampling/cli.py @@ -16,6 +16,7 @@ PROFILING_MODE_WALL, PROFILING_MODE_CPU, PROFILING_MODE_GIL, + PROFILING_MODE_EXCEPTION, SORT_MODE_NSAMPLES, SORT_MODE_TOTTIME, SORT_MODE_CUMTIME, @@ -90,6 +91,7 @@ def _parse_mode(mode_string): "wall": PROFILING_MODE_WALL, "cpu": PROFILING_MODE_CPU, "gil": PROFILING_MODE_GIL, + "exception": PROFILING_MODE_EXCEPTION, } return mode_map[mode_string] @@ -213,10 +215,12 @@ def _add_mode_options(parser): mode_group = parser.add_argument_group("Mode options") mode_group.add_argument( "--mode", - choices=["wall", "cpu", "gil"], + choices=["wall", "cpu", "gil", "exception"], default="wall", help="Sampling mode: wall (all samples), cpu (only samples when thread is on CPU), " - "gil (only samples when thread holds the GIL). Incompatible with --async-aware", + "gil (only samples when thread holds the GIL), " + "exception (only samples when thread has an active exception). " + "Incompatible with --async-aware", ) mode_group.add_argument( "--async-mode", diff --git a/Lib/profiling/sampling/collector.py b/Lib/profiling/sampling/collector.py index 22055cf84007b6..a1f6ec190f6556 100644 --- a/Lib/profiling/sampling/collector.py +++ b/Lib/profiling/sampling/collector.py @@ -5,6 +5,7 @@ THREAD_STATUS_ON_CPU, THREAD_STATUS_GIL_REQUESTED, THREAD_STATUS_UNKNOWN, + THREAD_STATUS_HAS_EXCEPTION, ) try: @@ -170,7 +171,7 @@ def _collect_thread_status_stats(self, stack_frames): Returns: tuple: (aggregate_status_counts, has_gc_frame, per_thread_stats) - - aggregate_status_counts: dict with has_gil, on_cpu, etc. + - aggregate_status_counts: dict with has_gil, on_cpu, has_exception, etc. - has_gc_frame: bool indicating if any thread has GC frames - per_thread_stats: dict mapping thread_id to per-thread counts """ @@ -179,6 +180,7 @@ def _collect_thread_status_stats(self, stack_frames): "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, } has_gc_frame = False @@ -200,6 +202,8 @@ def _collect_thread_status_stats(self, stack_frames): status_counts["gil_requested"] += 1 if status_flags & THREAD_STATUS_UNKNOWN: status_counts["unknown"] += 1 + if status_flags & THREAD_STATUS_HAS_EXCEPTION: + status_counts["has_exception"] += 1 # Track per-thread statistics thread_id = getattr(thread_info, "thread_id", None) @@ -210,6 +214,7 @@ def _collect_thread_status_stats(self, stack_frames): "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, "gc_samples": 0, } @@ -225,6 +230,8 @@ def _collect_thread_status_stats(self, stack_frames): thread_stats["gil_requested"] += 1 if status_flags & THREAD_STATUS_UNKNOWN: thread_stats["unknown"] += 1 + if status_flags & THREAD_STATUS_HAS_EXCEPTION: + thread_stats["has_exception"] += 1 # Check for GC frames in this thread frames = getattr(thread_info, "frame_info", None) diff --git a/Lib/profiling/sampling/constants.py b/Lib/profiling/sampling/constants.py index b05f1703c8505f..34b85ba4b3c61d 100644 --- a/Lib/profiling/sampling/constants.py +++ b/Lib/profiling/sampling/constants.py @@ -5,6 +5,7 @@ PROFILING_MODE_CPU = 1 PROFILING_MODE_GIL = 2 PROFILING_MODE_ALL = 3 # Combines GIL + CPU checks +PROFILING_MODE_EXCEPTION = 4 # Only samples when thread has an active exception # Sort mode constants SORT_MODE_NSAMPLES = 0 @@ -25,6 +26,7 @@ THREAD_STATUS_ON_CPU, THREAD_STATUS_UNKNOWN, THREAD_STATUS_GIL_REQUESTED, + THREAD_STATUS_HAS_EXCEPTION, ) except ImportError: # Fallback for tests or when module is not available @@ -32,3 +34,4 @@ THREAD_STATUS_ON_CPU = (1 << 1) THREAD_STATUS_UNKNOWN = (1 << 2) THREAD_STATUS_GIL_REQUESTED = (1 << 3) + THREAD_STATUS_HAS_EXCEPTION = (1 << 4) diff --git a/Lib/profiling/sampling/gecko_collector.py b/Lib/profiling/sampling/gecko_collector.py index b25ee079dd6ce9..608a15da483729 100644 --- a/Lib/profiling/sampling/gecko_collector.py +++ b/Lib/profiling/sampling/gecko_collector.py @@ -9,13 +9,14 @@ from .collector import Collector from .opcode_utils import get_opcode_info, format_opcode try: - from _remote_debugging import THREAD_STATUS_HAS_GIL, THREAD_STATUS_ON_CPU, THREAD_STATUS_UNKNOWN, THREAD_STATUS_GIL_REQUESTED + from _remote_debugging import THREAD_STATUS_HAS_GIL, THREAD_STATUS_ON_CPU, THREAD_STATUS_UNKNOWN, THREAD_STATUS_GIL_REQUESTED, THREAD_STATUS_HAS_EXCEPTION except ImportError: # Fallback if module not available (shouldn't happen in normal use) THREAD_STATUS_HAS_GIL = (1 << 0) THREAD_STATUS_ON_CPU = (1 << 1) THREAD_STATUS_UNKNOWN = (1 << 2) THREAD_STATUS_GIL_REQUESTED = (1 << 3) + THREAD_STATUS_HAS_EXCEPTION = (1 << 4) # Categories matching Firefox Profiler expectations @@ -28,6 +29,7 @@ {"name": "CPU", "color": "purple", "subcategories": ["Other"]}, {"name": "Code Type", "color": "red", "subcategories": ["Other"]}, {"name": "Opcodes", "color": "magenta", "subcategories": ["Other"]}, + {"name": "Exception", "color": "lightblue", "subcategories": ["Other"]}, ] # Category indices @@ -39,6 +41,7 @@ CATEGORY_CPU = 5 CATEGORY_CODE_TYPE = 6 CATEGORY_OPCODES = 7 +CATEGORY_EXCEPTION = 8 # Subcategory indices DEFAULT_SUBCATEGORY = 0 @@ -88,6 +91,8 @@ def __init__(self, sample_interval_usec, *, skip_idle=False, opcodes=False): self.python_code_start = {} # Thread running Python code (has GIL) self.native_code_start = {} # Thread running native code (on CPU without GIL) self.gil_wait_start = {} # Thread waiting for GIL + self.exception_start = {} # Thread has an exception set + self.no_exception_start = {} # Thread has no exception set # GC event tracking: track GC start time per thread self.gc_start_per_thread = {} # tid -> start_time @@ -204,6 +209,13 @@ def collect(self, stack_frames): self._add_marker(tid, "Waiting for GIL", self.gil_wait_start.pop(tid), current_time, CATEGORY_GIL) + # Track exception state (Has Exception / No Exception) + has_exception = bool(status_flags & THREAD_STATUS_HAS_EXCEPTION) + self._track_state_transition( + tid, has_exception, self.exception_start, self.no_exception_start, + "Has Exception", "No Exception", CATEGORY_EXCEPTION, current_time + ) + # Track GC events by detecting frames in the stack trace # This leverages the improved GC frame tracking from commit 336366fd7ca # which precisely identifies the thread that initiated GC collection @@ -622,6 +634,8 @@ def _finalize_markers(self): (self.native_code_start, "Native Code", CATEGORY_CODE_TYPE), (self.gil_wait_start, "Waiting for GIL", CATEGORY_GIL), (self.gc_start_per_thread, "GC Collecting", CATEGORY_GC), + (self.exception_start, "Has Exception", CATEGORY_EXCEPTION), + (self.no_exception_start, "No Exception", CATEGORY_EXCEPTION), ] for state_dict, marker_name, category in marker_states: diff --git a/Lib/profiling/sampling/live_collector/collector.py b/Lib/profiling/sampling/live_collector/collector.py index 3d25b5969835c0..1652089ad3f52d 100644 --- a/Lib/profiling/sampling/live_collector/collector.py +++ b/Lib/profiling/sampling/live_collector/collector.py @@ -17,6 +17,7 @@ THREAD_STATUS_ON_CPU, THREAD_STATUS_UNKNOWN, THREAD_STATUS_GIL_REQUESTED, + THREAD_STATUS_HAS_EXCEPTION, PROFILING_MODE_CPU, PROFILING_MODE_GIL, PROFILING_MODE_WALL, @@ -61,6 +62,7 @@ class ThreadData: on_cpu: int = 0 gil_requested: int = 0 unknown: int = 0 + has_exception: int = 0 total: int = 0 # Total status samples for this thread # Sample counts @@ -82,6 +84,8 @@ def increment_status_flag(self, status_flags): self.gil_requested += 1 if status_flags & THREAD_STATUS_UNKNOWN: self.unknown += 1 + if status_flags & THREAD_STATUS_HAS_EXCEPTION: + self.has_exception += 1 self.total += 1 def as_status_dict(self): @@ -91,6 +95,7 @@ def as_status_dict(self): "on_cpu": self.on_cpu, "gil_requested": self.gil_requested, "unknown": self.unknown, + "has_exception": self.has_exception, "total": self.total, } @@ -160,6 +165,7 @@ def __init__( "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, # Total thread count across all samples } self.gc_frame_samples = 0 # Track samples with GC frames @@ -359,6 +365,7 @@ def collect(self, stack_frames): thread_data.on_cpu += stats.get("on_cpu", 0) thread_data.gil_requested += stats.get("gil_requested", 0) thread_data.unknown += stats.get("unknown", 0) + thread_data.has_exception += stats.get("has_exception", 0) thread_data.total += stats.get("total", 0) if stats.get("gc_samples", 0): thread_data.gc_frame_samples += stats["gc_samples"] @@ -723,6 +730,7 @@ def reset_stats(self): "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, } self.gc_frame_samples = 0 diff --git a/Lib/profiling/sampling/live_collector/widgets.py b/Lib/profiling/sampling/live_collector/widgets.py index 869405671ffeed..8f72f69b057628 100644 --- a/Lib/profiling/sampling/live_collector/widgets.py +++ b/Lib/profiling/sampling/live_collector/widgets.py @@ -389,6 +389,7 @@ def draw_thread_status(self, line, width): pct_on_gil = (status_counts["has_gil"] / total_threads) * 100 pct_off_gil = 100.0 - pct_on_gil pct_gil_requested = (status_counts["gil_requested"] / total_threads) * 100 + pct_exception = (status_counts.get("has_exception", 0) / total_threads) * 100 # Get GC percentage based on view mode if thread_data: @@ -427,6 +428,17 @@ def draw_thread_status(self, line, width): add_separator=True, ) + # Show exception stats + if col < width - 15: + col = self._add_percentage_stat( + line, + col, + pct_exception, + "exc", + self.colors["red"], + add_separator=(col > 11), + ) + # Always show GC stats if col < width - 15: col = self._add_percentage_stat( diff --git a/Lib/profiling/sampling/sample.py b/Lib/profiling/sampling/sample.py index d5b8e21134ca18..294ec3003fc6bc 100644 --- a/Lib/profiling/sampling/sample.py +++ b/Lib/profiling/sampling/sample.py @@ -17,6 +17,7 @@ PROFILING_MODE_CPU, PROFILING_MODE_GIL, PROFILING_MODE_ALL, + PROFILING_MODE_EXCEPTION, ) try: from .live_collector import LiveStatsCollector @@ -300,7 +301,8 @@ def sample( all_threads: Whether to sample all threads realtime_stats: Whether to print real-time sampling statistics mode: Profiling mode - WALL (all samples), CPU (only when on CPU), - GIL (only when holding GIL), ALL (includes GIL and CPU status) + GIL (only when holding GIL), ALL (includes GIL and CPU status), + EXCEPTION (only when thread has an active exception) native: Whether to include native frames gc: Whether to include GC frames opcodes: Whether to include opcode information @@ -360,7 +362,8 @@ def sample_live( all_threads: Whether to sample all threads realtime_stats: Whether to print real-time sampling statistics mode: Profiling mode - WALL (all samples), CPU (only when on CPU), - GIL (only when holding GIL), ALL (includes GIL and CPU status) + GIL (only when holding GIL), ALL (includes GIL and CPU status), + EXCEPTION (only when thread has an active exception) native: Whether to include native frames gc: Whether to include GC frames opcodes: Whether to include opcode information diff --git a/Lib/profiling/sampling/stack_collector.py b/Lib/profiling/sampling/stack_collector.py index e5b86719f00b01..b7aa7f5ff82da3 100644 --- a/Lib/profiling/sampling/stack_collector.py +++ b/Lib/profiling/sampling/stack_collector.py @@ -87,12 +87,13 @@ def __init__(self, *args, **kwargs): "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, } self.samples_with_gc_frames = 0 # Per-thread statistics - self.per_thread_stats = {} # {thread_id: {has_gil, on_cpu, gil_requested, unknown, total, gc_samples}} + self.per_thread_stats = {} # {thread_id: {has_gil, on_cpu, gil_requested, unknown, has_exception, total, gc_samples}} def collect(self, stack_frames, skip_idle=False): """Override to track thread status statistics before processing frames.""" @@ -118,6 +119,7 @@ def collect(self, stack_frames, skip_idle=False): "on_cpu": 0, "gil_requested": 0, "unknown": 0, + "has_exception": 0, "total": 0, "gc_samples": 0, } @@ -247,12 +249,16 @@ def convert_children(children, min_samples): } # Calculate thread status percentages for display + import sysconfig + is_free_threaded = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) total_threads = max(1, self.thread_status_counts["total"]) thread_stats = { "has_gil_pct": (self.thread_status_counts["has_gil"] / total_threads) * 100, "on_cpu_pct": (self.thread_status_counts["on_cpu"] / total_threads) * 100, "gil_requested_pct": (self.thread_status_counts["gil_requested"] / total_threads) * 100, + "has_exception_pct": (self.thread_status_counts["has_exception"] / total_threads) * 100, "gc_pct": (self.samples_with_gc_frames / max(1, self._sample_count)) * 100, + "free_threaded": is_free_threaded, **self.thread_status_counts } @@ -265,6 +271,7 @@ def convert_children(children, min_samples): "has_gil_pct": (stats["has_gil"] / total) * 100, "on_cpu_pct": (stats["on_cpu"] / total) * 100, "gil_requested_pct": (stats["gil_requested"] / total) * 100, + "has_exception_pct": (stats["has_exception"] / total) * 100, "gc_pct": (stats["gc_samples"] / total_samples_denominator) * 100, **stats } diff --git a/Lib/test/test_external_inspection.py b/Lib/test/test_external_inspection.py index 365beec49497a8..4f3beb15f53b33 100644 --- a/Lib/test/test_external_inspection.py +++ b/Lib/test/test_external_inspection.py @@ -26,11 +26,13 @@ PROFILING_MODE_CPU = 1 PROFILING_MODE_GIL = 2 PROFILING_MODE_ALL = 3 +PROFILING_MODE_EXCEPTION = 4 # Thread status flags THREAD_STATUS_HAS_GIL = 1 << 0 THREAD_STATUS_ON_CPU = 1 << 1 THREAD_STATUS_UNKNOWN = 1 << 2 +THREAD_STATUS_HAS_EXCEPTION = 1 << 4 # Maximum number of retry attempts for operations that may fail transiently MAX_TRIES = 10 @@ -2260,6 +2262,412 @@ def busy_thread(): finally: _cleanup_sockets(*client_sockets, server_socket) + def _make_exception_test_script(self, port): + """Create script with exception and normal threads for testing.""" + return textwrap.dedent( + f"""\ + import socket + import threading + import time + + def exception_thread(): + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"exception:" + str(threading.get_native_id()).encode()) + try: + raise ValueError("test exception") + except ValueError: + while True: + time.sleep(0.01) + + def normal_thread(): + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"normal:" + str(threading.get_native_id()).encode()) + while True: + sum(range(1000)) + + t1 = threading.Thread(target=exception_thread) + t2 = threading.Thread(target=normal_thread) + t1.start() + t2.start() + t1.join() + t2.join() + """ + ) + + @contextmanager + def _run_exception_test_process(self): + """Context manager to run exception test script and yield thread IDs and process.""" + port = find_unused_port() + script = self._make_exception_test_script(port) + + with os_helper.temp_dir() as tmp_dir: + script_file = make_script(tmp_dir, "script", script) + server_socket = _create_server_socket(port, backlog=2) + client_sockets = [] + + try: + with _managed_subprocess([sys.executable, script_file]) as p: + exception_tid = None + normal_tid = None + + for _ in range(2): + client_socket, _ = server_socket.accept() + client_sockets.append(client_socket) + line = client_socket.recv(1024) + if line: + if line.startswith(b"exception:"): + try: + exception_tid = int(line.split(b":")[-1]) + except (ValueError, IndexError): + pass + elif line.startswith(b"normal:"): + try: + normal_tid = int(line.split(b":")[-1]) + except (ValueError, IndexError): + pass + + server_socket.close() + server_socket = None + + yield p, exception_tid, normal_tid + finally: + _cleanup_sockets(*client_sockets, server_socket) + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_thread_status_exception_detection(self): + """Test that THREAD_STATUS_HAS_EXCEPTION is set when thread has an active exception.""" + with self._run_exception_test_process() as (p, exception_tid, normal_tid): + self.assertIsNotNone(exception_tid, "Exception thread id not received") + self.assertIsNotNone(normal_tid, "Normal thread id not received") + + statuses = {} + try: + unwinder = RemoteUnwinder( + p.pid, + all_threads=True, + mode=PROFILING_MODE_ALL, + skip_non_matching_threads=False, + ) + for _ in range(MAX_TRIES): + traces = unwinder.get_stack_trace() + statuses = self._get_thread_statuses(traces) + + if ( + exception_tid in statuses + and normal_tid in statuses + and (statuses[exception_tid] & THREAD_STATUS_HAS_EXCEPTION) + and not (statuses[normal_tid] & THREAD_STATUS_HAS_EXCEPTION) + ): + break + time.sleep(0.5) + except PermissionError: + self.skipTest("Insufficient permissions to read the stack trace") + + self.assertIn(exception_tid, statuses) + self.assertIn(normal_tid, statuses) + self.assertTrue( + statuses[exception_tid] & THREAD_STATUS_HAS_EXCEPTION, + "Exception thread should have HAS_EXCEPTION flag", + ) + self.assertFalse( + statuses[normal_tid] & THREAD_STATUS_HAS_EXCEPTION, + "Normal thread should not have HAS_EXCEPTION flag", + ) + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_thread_status_exception_mode_filtering(self): + """Test that PROFILING_MODE_EXCEPTION correctly filters threads.""" + with self._run_exception_test_process() as (p, exception_tid, normal_tid): + self.assertIsNotNone(exception_tid, "Exception thread id not received") + self.assertIsNotNone(normal_tid, "Normal thread id not received") + + try: + unwinder = RemoteUnwinder( + p.pid, + all_threads=True, + mode=PROFILING_MODE_EXCEPTION, + skip_non_matching_threads=True, + ) + for _ in range(MAX_TRIES): + traces = unwinder.get_stack_trace() + statuses = self._get_thread_statuses(traces) + + if exception_tid in statuses: + self.assertNotIn( + normal_tid, + statuses, + "Normal thread should be filtered out in exception mode", + ) + return + time.sleep(0.5) + except PermissionError: + self.skipTest("Insufficient permissions to read the stack trace") + + self.fail("Never found exception thread in exception mode") + +class TestExceptionDetectionScenarios(RemoteInspectionTestBase): + """Test exception detection across all scenarios. + + This class verifies the exact conditions under which THREAD_STATUS_HAS_EXCEPTION + is set. Each test covers a specific scenario: + + 1. except_block: Thread inside except block + -> SHOULD have HAS_EXCEPTION (exc_info->exc_value is set) + + 2. finally_propagating: Exception propagating through finally block + -> SHOULD have HAS_EXCEPTION (current_exception is set) + + 3. finally_after_except: Finally block after except handled exception + -> Should NOT have HAS_EXCEPTION (exc_info cleared after except) + + 4. finally_no_exception: Finally block with no exception raised + -> Should NOT have HAS_EXCEPTION (no exception state) + """ + + def _make_single_scenario_script(self, port, scenario): + """Create script for a single exception scenario.""" + scenarios = { + "except_block": f"""\ +import socket +import threading +import time + +def target_thread(): + '''Inside except block - exception info is present''' + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"ready:" + str(threading.get_native_id()).encode()) + try: + raise ValueError("test") + except ValueError: + while True: + time.sleep(0.01) + +t = threading.Thread(target=target_thread) +t.start() +t.join() +""", + "finally_propagating": f"""\ +import socket +import threading +import time + +def target_thread(): + '''Exception propagating through finally - current_exception is set''' + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"ready:" + str(threading.get_native_id()).encode()) + try: + try: + raise ValueError("propagating") + finally: + # Exception is propagating through here + while True: + time.sleep(0.01) + except: + pass # Never reached due to infinite loop + +t = threading.Thread(target=target_thread) +t.start() +t.join() +""", + "finally_after_except": f"""\ +import socket +import threading +import time + +def target_thread(): + '''Finally runs after except handled - exc_info is cleared''' + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"ready:" + str(threading.get_native_id()).encode()) + try: + raise ValueError("test") + except ValueError: + pass # Exception caught and handled + finally: + while True: + time.sleep(0.01) + +t = threading.Thread(target=target_thread) +t.start() +t.join() +""", + "finally_no_exception": f"""\ +import socket +import threading +import time + +def target_thread(): + '''Finally with no exception at all''' + conn = socket.create_connection(("localhost", {port})) + conn.sendall(b"ready:" + str(threading.get_native_id()).encode()) + try: + pass # No exception + finally: + while True: + time.sleep(0.01) + +t = threading.Thread(target=target_thread) +t.start() +t.join() +""", + } + + return scenarios[scenario] + + @contextmanager + def _run_scenario_process(self, scenario): + """Context manager to run a single scenario and yield thread ID and process.""" + port = find_unused_port() + script = self._make_single_scenario_script(port, scenario) + + with os_helper.temp_dir() as tmp_dir: + script_file = make_script(tmp_dir, "script", script) + server_socket = _create_server_socket(port, backlog=1) + client_socket = None + + try: + with _managed_subprocess([sys.executable, script_file]) as p: + thread_tid = None + + client_socket, _ = server_socket.accept() + line = client_socket.recv(1024) + if line and line.startswith(b"ready:"): + try: + thread_tid = int(line.split(b":")[-1]) + except (ValueError, IndexError): + pass + + server_socket.close() + server_socket = None + + yield p, thread_tid + finally: + _cleanup_sockets(client_socket, server_socket) + + def _check_exception_status(self, p, thread_tid, expect_exception): + """Helper to check if thread has expected exception status.""" + try: + unwinder = RemoteUnwinder( + p.pid, + all_threads=True, + mode=PROFILING_MODE_ALL, + skip_non_matching_threads=False, + ) + + # Collect multiple samples for reliability + results = [] + for _ in range(MAX_TRIES): + traces = unwinder.get_stack_trace() + statuses = self._get_thread_statuses(traces) + + if thread_tid in statuses: + has_exc = bool(statuses[thread_tid] & THREAD_STATUS_HAS_EXCEPTION) + results.append(has_exc) + + if len(results) >= 3: + break + + time.sleep(0.2) + + # Check majority of samples match expected + if not results: + self.fail("Never found target thread in stack traces") + + majority = sum(results) > len(results) // 2 + if expect_exception: + self.assertTrue( + majority, + f"Thread should have HAS_EXCEPTION flag, got {results}" + ) + else: + self.assertFalse( + majority, + f"Thread should NOT have HAS_EXCEPTION flag, got {results}" + ) + + except PermissionError: + self.skipTest("Insufficient permissions to read the stack trace") + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_except_block_has_exception(self): + """Test that thread inside except block has HAS_EXCEPTION flag. + + When a thread is executing inside an except block, exc_info->exc_value + is set, so THREAD_STATUS_HAS_EXCEPTION should be True. + """ + with self._run_scenario_process("except_block") as (p, thread_tid): + self.assertIsNotNone(thread_tid, "Thread ID not received") + self._check_exception_status(p, thread_tid, expect_exception=True) + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_finally_propagating_has_exception(self): + """Test that finally block with propagating exception has HAS_EXCEPTION flag. + + When an exception is propagating through a finally block (not yet caught), + current_exception is set, so THREAD_STATUS_HAS_EXCEPTION should be True. + """ + with self._run_scenario_process("finally_propagating") as (p, thread_tid): + self.assertIsNotNone(thread_tid, "Thread ID not received") + self._check_exception_status(p, thread_tid, expect_exception=True) + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_finally_after_except_no_exception(self): + """Test that finally block after except has NO HAS_EXCEPTION flag. + + When a finally block runs after an except block has handled the exception, + Python clears exc_info before entering finally, so THREAD_STATUS_HAS_EXCEPTION + should be False. + """ + with self._run_scenario_process("finally_after_except") as (p, thread_tid): + self.assertIsNotNone(thread_tid, "Thread ID not received") + self._check_exception_status(p, thread_tid, expect_exception=False) + + @unittest.skipIf( + sys.platform not in ("linux", "darwin", "win32"), + "Test only runs on supported platforms (Linux, macOS, or Windows)", + ) + @unittest.skipIf( + sys.platform == "android", "Android raises Linux-specific exception" + ) + def test_finally_no_exception_no_flag(self): + """Test that finally block with no exception has NO HAS_EXCEPTION flag. + + When a finally block runs during normal execution (no exception raised), + there is no exception state, so THREAD_STATUS_HAS_EXCEPTION should be False. + """ + with self._run_scenario_process("finally_no_exception") as (p, thread_tid): + self.assertIsNotNone(thread_tid, "Thread ID not received") + self._check_exception_status(p, thread_tid, expect_exception=False) + class TestFrameCaching(RemoteInspectionTestBase): """Test that frame caching produces correct results. diff --git a/Lib/test/test_profiling/test_sampling_profiler/test_modes.py b/Lib/test/test_profiling/test_sampling_profiler/test_modes.py index c0457ee7eb8357..c086fbb572b256 100644 --- a/Lib/test/test_profiling/test_sampling_profiler/test_modes.py +++ b/Lib/test/test_profiling/test_sampling_profiler/test_modes.py @@ -427,7 +427,198 @@ def test_parse_mode_function(self): self.assertEqual(_parse_mode("wall"), 0) self.assertEqual(_parse_mode("cpu"), 1) self.assertEqual(_parse_mode("gil"), 2) + self.assertEqual(_parse_mode("exception"), 4) # Test invalid mode raises KeyError with self.assertRaises(KeyError): _parse_mode("invalid") + + +class TestExceptionModeFiltering(unittest.TestCase): + """Test exception mode filtering functionality (--mode=exception).""" + + def test_exception_mode_validation(self): + """Test that CLI accepts exception mode choice correctly.""" + from profiling.sampling.cli import main + + test_args = [ + "profiling.sampling.cli", + "attach", + "12345", + "--mode", + "exception", + ] + + with ( + mock.patch("sys.argv", test_args), + mock.patch("profiling.sampling.cli.sample") as mock_sample, + ): + try: + main() + except (SystemExit, OSError, RuntimeError): + pass # Expected due to invalid PID + + # Should have attempted to call sample with mode=4 (exception mode) + mock_sample.assert_called_once() + call_args = mock_sample.call_args + # Check the mode parameter (should be in kwargs) + self.assertEqual(call_args.kwargs.get("mode"), 4) # PROFILING_MODE_EXCEPTION + + def test_exception_mode_sample_function_call(self): + """Test that sample() function correctly uses exception mode.""" + with ( + mock.patch( + "profiling.sampling.sample.SampleProfiler" + ) as mock_profiler, + ): + # Mock the profiler instance + mock_instance = mock.Mock() + mock_profiler.return_value = mock_instance + + # Create a real collector instance + collector = PstatsCollector(sample_interval_usec=1000, skip_idle=True) + + # Call sample with exception mode + profiling.sampling.sample.sample( + 12345, + collector, + mode=4, # PROFILING_MODE_EXCEPTION + duration_sec=1, + ) + + # Verify SampleProfiler was created with correct mode + mock_profiler.assert_called_once() + call_args = mock_profiler.call_args + self.assertEqual(call_args[1]["mode"], 4) # mode parameter + + # Verify profiler.sample was called + mock_instance.sample.assert_called_once() + + def test_exception_mode_cli_argument_parsing(self): + """Test CLI argument parsing for exception mode with various options.""" + from profiling.sampling.cli import main + + test_args = [ + "profiling.sampling.cli", + "attach", + "12345", + "--mode", + "exception", + "-i", + "500", + "-d", + "5", + ] + + with ( + mock.patch("sys.argv", test_args), + mock.patch("profiling.sampling.cli.sample") as mock_sample, + ): + try: + main() + except (SystemExit, OSError, RuntimeError): + pass # Expected due to invalid PID + + # Verify all arguments were parsed correctly + mock_sample.assert_called_once() + call_args = mock_sample.call_args + self.assertEqual(call_args.kwargs.get("mode"), 4) # exception mode + self.assertEqual(call_args.kwargs.get("duration_sec"), 5) + + def test_exception_mode_constants_are_defined(self): + """Test that exception mode constant is properly defined.""" + from profiling.sampling.constants import PROFILING_MODE_EXCEPTION + self.assertEqual(PROFILING_MODE_EXCEPTION, 4) + + @requires_subprocess() + def test_exception_mode_integration_filtering(self): + """Integration test: Exception mode should only capture threads with active exceptions.""" + # Script with one thread handling an exception and one normal thread + exception_vs_normal_script = """ +import time +import threading + +exception_ready = threading.Event() + +def normal_worker(): + x = 0 + while True: + x += 1 + +def exception_handling_worker(): + try: + raise ValueError("test exception") + except ValueError: + # Signal AFTER entering except block, then do CPU work + exception_ready.set() + x = 0 + while True: + x += 1 + +normal_thread = threading.Thread(target=normal_worker) +exception_thread = threading.Thread(target=exception_handling_worker) +normal_thread.start() +exception_thread.start() +exception_ready.wait() +_test_sock.sendall(b"working") +normal_thread.join() +exception_thread.join() +""" + with test_subprocess(exception_vs_normal_script, wait_for_working=True) as subproc: + + with ( + io.StringIO() as captured_output, + mock.patch("sys.stdout", captured_output), + ): + try: + collector = PstatsCollector(sample_interval_usec=5000, skip_idle=True) + profiling.sampling.sample.sample( + subproc.process.pid, + collector, + duration_sec=2.0, + mode=4, # Exception mode + all_threads=True, + ) + collector.print_stats(show_summary=False, mode=4) + except (PermissionError, RuntimeError) as e: + self.skipTest( + "Insufficient permissions for remote profiling" + ) + + exception_mode_output = captured_output.getvalue() + + # Test wall-clock mode (mode=0) - should capture both functions + with ( + io.StringIO() as captured_output, + mock.patch("sys.stdout", captured_output), + ): + try: + collector = PstatsCollector(sample_interval_usec=5000, skip_idle=False) + profiling.sampling.sample.sample( + subproc.process.pid, + collector, + duration_sec=2.0, + mode=0, # Wall-clock mode + all_threads=True, + ) + collector.print_stats(show_summary=False) + except (PermissionError, RuntimeError) as e: + self.skipTest( + "Insufficient permissions for remote profiling" + ) + + wall_mode_output = captured_output.getvalue() + + # Verify both modes captured samples + self.assertIn("Captured", exception_mode_output) + self.assertIn("samples", exception_mode_output) + self.assertIn("Captured", wall_mode_output) + self.assertIn("samples", wall_mode_output) + + # Exception mode should strongly favor exception_handling_worker over normal_worker + self.assertIn("exception_handling_worker", exception_mode_output) + self.assertNotIn("normal_worker", exception_mode_output) + + # Wall-clock mode should capture both types of work + self.assertIn("exception_handling_worker", wall_mode_output) + self.assertIn("normal_worker", wall_mode_output) diff --git a/Misc/NEWS.d/next/Library/2025-12-11-04-18-49.gh-issue-138122.m3EF9E.rst b/Misc/NEWS.d/next/Library/2025-12-11-04-18-49.gh-issue-138122.m3EF9E.rst new file mode 100644 index 00000000000000..9c471ee438df15 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-12-11-04-18-49.gh-issue-138122.m3EF9E.rst @@ -0,0 +1,3 @@ +Add ``--mode=exception`` to the sampling profiler to capture samples only from +threads with an active exception, useful for analyzing exception handling +overhead. Patch by Pablo Galindo. diff --git a/Modules/_remote_debugging/_remote_debugging.h b/Modules/_remote_debugging/_remote_debugging.h index 0aa98349296b8a..fcb75b841b742e 100644 --- a/Modules/_remote_debugging/_remote_debugging.h +++ b/Modules/_remote_debugging/_remote_debugging.h @@ -109,10 +109,11 @@ typedef enum _WIN32_THREADSTATE { #define MAX_TLBC_SIZE 2048 /* Thread status flags */ -#define THREAD_STATUS_HAS_GIL (1 << 0) -#define THREAD_STATUS_ON_CPU (1 << 1) -#define THREAD_STATUS_UNKNOWN (1 << 2) -#define THREAD_STATUS_GIL_REQUESTED (1 << 3) +#define THREAD_STATUS_HAS_GIL (1 << 0) +#define THREAD_STATUS_ON_CPU (1 << 1) +#define THREAD_STATUS_UNKNOWN (1 << 2) +#define THREAD_STATUS_GIL_REQUESTED (1 << 3) +#define THREAD_STATUS_HAS_EXCEPTION (1 << 4) /* Exception cause macro */ #define set_exception_cause(unwinder, exc_type, message) \ @@ -209,7 +210,8 @@ enum _ProfilingMode { PROFILING_MODE_WALL = 0, PROFILING_MODE_CPU = 1, PROFILING_MODE_GIL = 2, - PROFILING_MODE_ALL = 3 + PROFILING_MODE_ALL = 3, + PROFILING_MODE_EXCEPTION = 4 }; typedef struct { diff --git a/Modules/_remote_debugging/module.c b/Modules/_remote_debugging/module.c index 9b05b911658190..a194d88c3c3ca0 100644 --- a/Modules/_remote_debugging/module.c +++ b/Modules/_remote_debugging/module.c @@ -568,7 +568,8 @@ _remote_debugging_RemoteUnwinder_get_stack_trace_impl(RemoteUnwinderObject *self gc_frame); if (!frame_info) { // Check if this was an intentional skip due to mode-based filtering - if ((self->mode == PROFILING_MODE_CPU || self->mode == PROFILING_MODE_GIL) && !PyErr_Occurred()) { + if ((self->mode == PROFILING_MODE_CPU || self->mode == PROFILING_MODE_GIL || + self->mode == PROFILING_MODE_EXCEPTION) && !PyErr_Occurred()) { // Thread was skipped due to mode filtering, continue to next thread continue; } @@ -1068,6 +1069,9 @@ _remote_debugging_exec(PyObject *m) if (PyModule_AddIntConstant(m, "THREAD_STATUS_GIL_REQUESTED", THREAD_STATUS_GIL_REQUESTED) < 0) { return -1; } + if (PyModule_AddIntConstant(m, "THREAD_STATUS_HAS_EXCEPTION", THREAD_STATUS_HAS_EXCEPTION) < 0) { + return -1; + } if (RemoteDebugging_InitState(st) < 0) { return -1; diff --git a/Modules/_remote_debugging/threads.c b/Modules/_remote_debugging/threads.c index f564e3a7256fa7..81c13ea48e3c49 100644 --- a/Modules/_remote_debugging/threads.c +++ b/Modules/_remote_debugging/threads.c @@ -344,6 +344,33 @@ unwind_stack_for_thread( gil_requested = 0; } + // Check exception state (both raised and handled exceptions) + int has_exception = 0; + + // Check current_exception (exception being raised/propagated) + uintptr_t current_exception = GET_MEMBER(uintptr_t, ts, + unwinder->debug_offsets.thread_state.current_exception); + if (current_exception != 0) { + has_exception = 1; + } + + // Check exc_state.exc_value (exception being handled in except block) + // exc_state is embedded in PyThreadState, so we read it directly from + // the thread state buffer. This catches most cases; nested exception + // handlers where exc_info points elsewhere are rare. + if (!has_exception) { + uintptr_t exc_value = GET_MEMBER(uintptr_t, ts, + unwinder->debug_offsets.thread_state.exc_state + + unwinder->debug_offsets.err_stackitem.exc_value); + if (exc_value != 0) { + has_exception = 1; + } + } + + if (has_exception) { + status_flags |= THREAD_STATUS_HAS_EXCEPTION; + } + // Check CPU status long pthread_id = GET_MEMBER(long, ts, unwinder->debug_offsets.thread_state.thread_id); @@ -368,6 +395,9 @@ unwind_stack_for_thread( } else if (unwinder->mode == PROFILING_MODE_GIL) { // Skip if doesn't have GIL should_skip = !(status_flags & THREAD_STATUS_HAS_GIL); + } else if (unwinder->mode == PROFILING_MODE_EXCEPTION) { + // Skip if thread doesn't have an exception active + should_skip = !(status_flags & THREAD_STATUS_HAS_EXCEPTION); } // PROFILING_MODE_WALL and PROFILING_MODE_ALL never skip } From 9fe6e3ed365f40d89a47c2a255e11f0363e9aa78 Mon Sep 17 00:00:00 2001 From: AZero13 Date: Thu, 11 Dec 2025 16:18:52 -0500 Subject: [PATCH 07/10] gh-142571: Check for errors before calling each syscall in `PyUnstable_CopyPerfMapFile()` (#142460) Co-authored-by: Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Co-authored-by: Victor Stinner Co-authored-by: Pablo Galindo Salgado --- ...-12-11-09-06-36.gh-issue-142571.Csdxnn.rst | 1 + Python/sysmodule.c | 23 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 Misc/NEWS.d/next/C_API/2025-12-11-09-06-36.gh-issue-142571.Csdxnn.rst diff --git a/Misc/NEWS.d/next/C_API/2025-12-11-09-06-36.gh-issue-142571.Csdxnn.rst b/Misc/NEWS.d/next/C_API/2025-12-11-09-06-36.gh-issue-142571.Csdxnn.rst new file mode 100644 index 00000000000000..ea419b4fe1d6b0 --- /dev/null +++ b/Misc/NEWS.d/next/C_API/2025-12-11-09-06-36.gh-issue-142571.Csdxnn.rst @@ -0,0 +1 @@ +:c:func:`!PyUnstable_CopyPerfMapFile` now checks that opening the file succeeded before flushing. diff --git a/Python/sysmodule.c b/Python/sysmodule.c index b4b441bf4d9519..94eb3164ecad58 100644 --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -2753,20 +2753,31 @@ PyAPI_FUNC(int) PyUnstable_CopyPerfMapFile(const char* parent_filename) { } char buf[4096]; PyThread_acquire_lock(perf_map_state.map_lock, 1); - int fflush_result = 0, result = 0; + int result = 0; while (1) { size_t bytes_read = fread(buf, 1, sizeof(buf), from); + if (bytes_read == 0) { + if (ferror(from)) { + result = -1; + } + break; + } + size_t bytes_written = fwrite(buf, 1, bytes_read, perf_map_state.perf_map); - fflush_result = fflush(perf_map_state.perf_map); - if (fflush_result != 0 || bytes_read == 0 || bytes_written < bytes_read) { + if (bytes_written < bytes_read) { result = -1; - goto close_and_release; + break; } + + if (fflush(perf_map_state.perf_map) != 0) { + result = -1; + break; + } + if (bytes_read < sizeof(buf) && feof(from)) { - goto close_and_release; + break; } } -close_and_release: fclose(from); PyThread_release_lock(perf_map_state.map_lock); return result; From 0a62f8277e9a0dd9f34b0b070adb83994e81b2a8 Mon Sep 17 00:00:00 2001 From: Sam Gross Date: Thu, 11 Dec 2025 16:23:19 -0500 Subject: [PATCH 08/10] gh-142534: Avoid TSan warnings in dictobject.c (gh-142544) There are places we use "relaxed" loads where C11 requires "consume" or stronger. Unfortunately, compilers don't really implement "consume" so fake it for our use in a way that avoids upsetting TSan. --- Include/cpython/pyatomic.h | 11 +++++++++++ Include/internal/pycore_pyatomic_ft_wrappers.h | 3 +++ Objects/dictobject.c | 12 ++++++------ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/Include/cpython/pyatomic.h b/Include/cpython/pyatomic.h index 2a0c11e7b3ad66..790640309f1e03 100644 --- a/Include/cpython/pyatomic.h +++ b/Include/cpython/pyatomic.h @@ -591,6 +591,17 @@ static inline void _Py_atomic_fence_release(void); // --- aliases --------------------------------------------------------------- +// Compilers don't really support "consume" semantics, so we fake it. Use +// "acquire" with TSan to support false positives. Use "relaxed" otherwise, +// because CPUs on all platforms we support respect address dependencies without +// extra barriers. +// See 2.6.7 in https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf +#if defined(_Py_THREAD_SANITIZER) +# define _Py_atomic_load_ptr_consume _Py_atomic_load_ptr_acquire +#else +# define _Py_atomic_load_ptr_consume _Py_atomic_load_ptr_relaxed +#endif + #if SIZEOF_LONG == 8 # define _Py_atomic_load_ulong(p) \ _Py_atomic_load_uint64((uint64_t *)p) diff --git a/Include/internal/pycore_pyatomic_ft_wrappers.h b/Include/internal/pycore_pyatomic_ft_wrappers.h index 2ae0185226f847..817c0763bf899b 100644 --- a/Include/internal/pycore_pyatomic_ft_wrappers.h +++ b/Include/internal/pycore_pyatomic_ft_wrappers.h @@ -31,6 +31,8 @@ extern "C" { _Py_atomic_store_ptr(&value, new_value) #define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) \ _Py_atomic_load_ptr_acquire(&value) +#define FT_ATOMIC_LOAD_PTR_CONSUME(value) \ + _Py_atomic_load_ptr_consume(&value) #define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) \ _Py_atomic_load_uintptr_acquire(&value) #define FT_ATOMIC_LOAD_PTR_RELAXED(value) \ @@ -125,6 +127,7 @@ extern "C" { #define FT_ATOMIC_LOAD_SSIZE_ACQUIRE(value) value #define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) value #define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) value +#define FT_ATOMIC_LOAD_PTR_CONSUME(value) value #define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) value #define FT_ATOMIC_LOAD_PTR_RELAXED(value) value #define FT_ATOMIC_LOAD_UINT8(value) value diff --git a/Objects/dictobject.c b/Objects/dictobject.c index e0eef7b46df4b2..ac4a46dab107e8 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -1078,7 +1078,7 @@ compare_unicode_unicode(PyDictObject *mp, PyDictKeysObject *dk, void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash) { PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix]; - PyObject *ep_key = FT_ATOMIC_LOAD_PTR_RELAXED(ep->me_key); + PyObject *ep_key = FT_ATOMIC_LOAD_PTR_CONSUME(ep->me_key); assert(ep_key != NULL); assert(PyUnicode_CheckExact(ep_key)); if (ep_key == key || @@ -1371,7 +1371,7 @@ compare_unicode_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk, void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash) { PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix]; - PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key); + PyObject *startkey = _Py_atomic_load_ptr_consume(&ep->me_key); assert(startkey == NULL || PyUnicode_CheckExact(ep->me_key)); assert(!PyUnicode_CheckExact(key)); @@ -1414,7 +1414,7 @@ compare_unicode_unicode_threadsafe(PyDictObject *mp, PyDictKeysObject *dk, void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash) { PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix]; - PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key); + PyObject *startkey = _Py_atomic_load_ptr_consume(&ep->me_key); if (startkey == key) { assert(PyUnicode_CheckExact(startkey)); return 1; @@ -1450,7 +1450,7 @@ compare_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk, void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash) { PyDictKeyEntry *ep = &((PyDictKeyEntry *)ep0)[ix]; - PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key); + PyObject *startkey = _Py_atomic_load_ptr_consume(&ep->me_key); if (startkey == key) { return 1; } @@ -5526,7 +5526,7 @@ dictiter_iternext_threadsafe(PyDictObject *d, PyObject *self, k = _Py_atomic_load_ptr_acquire(&d->ma_keys); assert(i >= 0); if (_PyDict_HasSplitTable(d)) { - PyDictValues *values = _Py_atomic_load_ptr_relaxed(&d->ma_values); + PyDictValues *values = _Py_atomic_load_ptr_consume(&d->ma_values); if (values == NULL) { goto concurrent_modification; } @@ -7114,7 +7114,7 @@ _PyObject_TryGetInstanceAttribute(PyObject *obj, PyObject *name, PyObject **attr Py_BEGIN_CRITICAL_SECTION(dict); if (dict->ma_values == values && FT_ATOMIC_LOAD_UINT8(values->valid)) { - value = _Py_atomic_load_ptr_relaxed(&values->values[ix]); + value = _Py_atomic_load_ptr_consume(&values->values[ix]); *attr = _Py_XNewRefWithLock(value); success = true; } else { From 2eca80ffab5a5fd616a71757a4bf84908bce3a8d Mon Sep 17 00:00:00 2001 From: Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Date: Thu, 11 Dec 2025 21:28:42 +0000 Subject: [PATCH 09/10] gh-138122: Make Tachyon flamegraph and heatmap output more similar (#142590) --- .../_flamegraph_assets/flamegraph.css | 34 ++++++++++++------- .../sampling/_flamegraph_assets/flamegraph.js | 32 +++++++++++++++++ .../flamegraph_template.html | 11 ++++-- .../sampling/_heatmap_assets/heatmap.css | 18 ---------- .../heatmap_pyfile_template.html | 2 +- .../sampling/_shared_assets/base.css | 4 ++- 6 files changed, 67 insertions(+), 34 deletions(-) diff --git a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.css b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.css index ee699f2982616a..c3b1d955f7f526 100644 --- a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.css +++ b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.css @@ -329,34 +329,44 @@ body.resizing-sidebar { gap: 8px; padding: 8px 10px; background: var(--bg-primary); - border: 1px solid var(--border); + border: 2px solid var(--border); border-radius: 8px; transition: all var(--transition-fast); animation: slideUp 0.4s ease-out backwards; - animation-delay: calc(var(--i, 0) * 0.05s); + animation-delay: calc(var(--i, 0) * 0.08s); overflow: hidden; + position: relative; } -.summary-card:nth-child(1) { --i: 0; } -.summary-card:nth-child(2) { --i: 1; } -.summary-card:nth-child(3) { --i: 2; } -.summary-card:nth-child(4) { --i: 3; } +.summary-card:nth-child(1) { --i: 0; --card-color: 55, 118, 171; } +.summary-card:nth-child(2) { --i: 1; --card-color: 40, 167, 69; } +.summary-card:nth-child(3) { --i: 2; --card-color: 255, 193, 7; } +.summary-card:nth-child(4) { --i: 3; --card-color: 111, 66, 193; } .summary-card:hover { - border-color: var(--accent); - background: var(--accent-glow); + border-color: rgba(var(--card-color), 0.6); + background: linear-gradient(135deg, rgba(var(--card-color), 0.08) 0%, var(--bg-primary) 100%); + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(var(--card-color), 0.15); } .summary-icon { - font-size: 16px; + font-size: 14px; width: 28px; height: 28px; display: flex; align-items: center; justify-content: center; - background: var(--bg-tertiary); + background: linear-gradient(135deg, rgba(var(--card-color), 0.15) 0%, rgba(var(--card-color), 0.05) 100%); + border: 1px solid rgba(var(--card-color), 0.2); border-radius: 6px; flex-shrink: 0; + transition: all var(--transition-fast); +} + +.summary-card:hover .summary-icon { + transform: scale(1.05); + background: linear-gradient(135deg, rgba(var(--card-color), 0.25) 0%, rgba(var(--card-color), 0.1) 100%); } .summary-data { @@ -368,8 +378,8 @@ body.resizing-sidebar { .summary-value { font-family: var(--font-mono); font-size: 13px; - font-weight: 700; - color: var(--accent); + font-weight: 800; + color: rgb(var(--card-color)); line-height: 1.2; white-space: nowrap; overflow: hidden; diff --git a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js index 0370c18a25049f..dc7bfed602f32a 100644 --- a/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js +++ b/Lib/profiling/sampling/_flamegraph_assets/flamegraph.js @@ -187,6 +187,27 @@ function restoreUIState() { } } +// ============================================================================ +// Logo/Favicon Setup +// ============================================================================ + +function setupLogos() { + const logo = document.querySelector('.sidebar-logo-img img'); + if (!logo) return; + + const navbarLogoContainer = document.getElementById('navbar-logo'); + if (navbarLogoContainer) { + const navbarLogo = logo.cloneNode(true); + navbarLogoContainer.appendChild(navbarLogo); + } + + const favicon = document.createElement('link'); + favicon.rel = 'icon'; + favicon.type = 'image/png'; + favicon.href = logo.src; + document.head.appendChild(favicon); +} + // ============================================================================ // Status Bar // ============================================================================ @@ -198,6 +219,11 @@ function updateStatusBar(nodeData, rootValue) { const timeMs = (nodeData.value / 1000).toFixed(2); const percent = rootValue > 0 ? ((nodeData.value / rootValue) * 100).toFixed(1) : "0.0"; + const brandEl = document.getElementById('status-brand'); + const taglineEl = document.getElementById('status-tagline'); + if (brandEl) brandEl.style.display = 'none'; + if (taglineEl) taglineEl.style.display = 'none'; + const locationEl = document.getElementById('status-location'); const funcItem = document.getElementById('status-func-item'); const timeItem = document.getElementById('status-time-item'); @@ -230,6 +256,11 @@ function clearStatusBar() { const el = document.getElementById(id); if (el) el.style.display = 'none'; }); + + const brandEl = document.getElementById('status-brand'); + const taglineEl = document.getElementById('status-tagline'); + if (brandEl) brandEl.style.display = 'flex'; + if (taglineEl) taglineEl.style.display = 'flex'; } // ============================================================================ @@ -1065,6 +1096,7 @@ function exportSVG() { function initFlamegraph() { ensureLibraryLoaded(); restoreUIState(); + setupLogos(); let processedData = EMBEDDED_DATA; if (EMBEDDED_DATA.strings) { diff --git a/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html b/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html index 29e5fdd3f35069..05277fb225c86f 100644 --- a/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html +++ b/Lib/profiling/sampling/_flamegraph_assets/flamegraph_template.html @@ -3,7 +3,7 @@ - Tachyon Profiler - Flamegraph + Tachyon Profiler - Flamegraph Report @@ -15,9 +15,10 @@
+ Tachyon - Profiler + Flamegraph Report
Heat Map
+ + Tachyon Profiler + + + Python Sampling Profiler +
- Back to Index +
-
+
Self Time
Total Time
-
+
Show All
Hot Only
-
+
Heat
Specialization diff --git a/Lib/profiling/sampling/_shared_assets/base.css b/Lib/profiling/sampling/_shared_assets/base.css index 46916709f19f54..c88cf58eef9260 100644 --- a/Lib/profiling/sampling/_shared_assets/base.css +++ b/Lib/profiling/sampling/_shared_assets/base.css @@ -387,6 +387,7 @@ body { button:focus-visible, select:focus-visible, input:focus-visible, +.toggle-switch:focus-visible, a.toolbar-btn:focus-visible { outline: 2px solid var(--python-gold); outline-offset: 2px;