From a836182b48ad8ccb8a8d7b3003b12b5f93072abf Mon Sep 17 00:00:00 2001 From: Tim Hatch Date: Wed, 22 Jun 2022 13:35:51 -0700 Subject: [PATCH 1/7] Support zip64 in zipimport * Reads zip64 files as produced by zipfile * Include tests (somewhat slow, however, because of the need to create "large" zips) * About the same amount of strictness reading invalid zip files as zipfile has --- Doc/library/zipimport.rst | 3 + Lib/test/test_zipimport.py | 10 ++ Lib/zipimport.py | 164 ++++++++++++++---- ...2-06-22-14-45-32.gh-issue-89739.CqZcRL.rst | 1 + 4 files changed, 141 insertions(+), 37 deletions(-) create mode 100644 Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst diff --git a/Doc/library/zipimport.rst b/Doc/library/zipimport.rst index fe1adcae163c23..4e34eb1f360867 100644 --- a/Doc/library/zipimport.rst +++ b/Doc/library/zipimport.rst @@ -30,6 +30,9 @@ Any files may be present in the ZIP archive, but importers are only invoked for corresponding :file:`.pyc` file, meaning that if a ZIP archive doesn't contain :file:`.pyc` files, importing may be rather slow. +.. versionchanged:: 3.12 + ZIP64 is supported + .. versionchanged:: 3.8 Previously, ZIP archives with an archive comment were not supported. diff --git a/Lib/test/test_zipimport.py b/Lib/test/test_zipimport.py index 84995be3295682..ea75da74c3a8d3 100644 --- a/Lib/test/test_zipimport.py +++ b/Lib/test/test_zipimport.py @@ -776,6 +776,16 @@ def testLargestPossibleComment(self): files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD, comment=b"c" * ((1 << 16) - 1)) + def testZip64(self): + # This is the simplest way to make zipfile generate the zip64 EOCD block + files = {f"f{n}.py": (NOW, test_src) for n in range(65537)} + self.doTest(".py", files, "f6") + + def testZip64CruftAndComment(self): + # This is the simplest way to make zipfile generate the zip64 EOCD block + files = {f"f{n}.py": (NOW, test_src) for n in range(65537)} + self.doTest(".py", files, "f65536", comment=b"c" * ((1 << 16) - 1)) + @support.requires_zlib() class CompressedZipImportTestCase(UncompressedZipImportTestCase): diff --git a/Lib/zipimport.py b/Lib/zipimport.py index 25eaee9c0f291b..6eb0217b2136f2 100644 --- a/Lib/zipimport.py +++ b/Lib/zipimport.py @@ -40,8 +40,14 @@ class ZipImportError(ImportError): _module_type = type(sys) END_CENTRAL_DIR_SIZE = 22 -STRING_END_ARCHIVE = b'PK\x05\x06' +END_CENTRAL_DIR_SIZE_64 = 56 +END_CENTRAL_DIR_LOCATOR_SIZE_64 = 20 +STRING_END_ARCHIVE = b'PK\x05\x06' # standard EOCD signature +STRING_END_LOCATOR_64 = b'PK\x06\x07' # Zip64 EOCD Locator signature +STRING_END_ZIP_64 = b'PK\x06\x06' # Zip64 EOCD signature MAX_COMMENT_LEN = (1 << 16) - 1 +MAX_UINT32 = 0xffffffff +ZIP64_EXTRA_TAG = 0x1 class zipimporter(_bootstrap_external._LoaderBasics): """zipimporter(archivepath) -> zipimporter object @@ -406,49 +412,69 @@ def _read_directory(archive): raise ZipImportError(f"can't open Zip file: {archive!r}", path=archive) with fp: + # Check if there's a comment. try: - fp.seek(-END_CENTRAL_DIR_SIZE, 2) - header_position = fp.tell() - buffer = fp.read(END_CENTRAL_DIR_SIZE) + fp.seek(0, 2) + file_size = fp.tell() except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - if len(buffer) != END_CENTRAL_DIR_SIZE: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - if buffer[:4] != STRING_END_ARCHIVE: - # Bad: End of Central Dir signature - # Check if there's a comment. - try: - fp.seek(0, 2) - file_size = fp.tell() - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", - path=archive) - max_comment_start = max(file_size - MAX_COMMENT_LEN - - END_CENTRAL_DIR_SIZE, 0) - try: - fp.seek(max_comment_start) - data = fp.read() - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", - path=archive) - pos = data.rfind(STRING_END_ARCHIVE) - if pos < 0: - raise ZipImportError(f'not a Zip file: {archive!r}', + raise ZipImportError(f"can't read Zip file: {archive!r}", + path=archive) + max_comment_start = max(file_size - MAX_COMMENT_LEN - + END_CENTRAL_DIR_SIZE - END_CENTRAL_DIR_SIZE_64 - + END_CENTRAL_DIR_LOCATOR_SIZE_64, 0) + try: + fp.seek(max_comment_start) + data = fp.read() + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", + path=archive) + pos = data.rfind(STRING_END_ARCHIVE) + pos64 = data.rfind(STRING_END_ZIP_64) + + if (pos64 >= 0 and pos64+END_CENTRAL_DIR_SIZE_64+END_CENTRAL_DIR_LOCATOR_SIZE_64==pos): + # Zip64 at "correct" offset from standard EOCD + buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] + if len(buffer) != END_CENTRAL_DIR_SIZE_64: + raise ZipImportError(f"corrupt Zip64 file: {archive!r}", path=archive) + header_position = file_size - len(data) + pos64 + + central_directory_size = int.from_bytes(buffer[40:48], 'little') + central_directory_position = int.from_bytes(buffer[48:56], 'little') + num_entries = int.from_bytes(buffer[24:32], 'little') + elif pos >= 0: buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] if len(buffer) != END_CENTRAL_DIR_SIZE: raise ZipImportError(f"corrupt Zip file: {archive!r}", path=archive) + header_position = file_size - len(data) + pos - header_size = _unpack_uint32(buffer[12:16]) - header_offset = _unpack_uint32(buffer[16:20]) - if header_position < header_size: + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + central_directory_size = _unpack_uint32(buffer[12:16]) + central_directory_position = _unpack_uint32(buffer[16:20]) + num_entries = _unpack_uint16(buffer[8:10]) + + # N.b. if someday you want to prefer the standard (non-zip64) EOCD, + # you need to adjust position by 76 for arc to be 0. + else: + raise ZipImportError(f'not a Zip file: {archive!r}', + path=archive) + + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + # XXX: These are cursory checks but are not as exact or strict as they + # could be. Checking the arc-adjusted value is probably good too. + if header_position < central_directory_size: raise ZipImportError(f'bad central directory size: {archive!r}', path=archive) - if header_position < header_offset: + if header_position < central_directory_position: raise ZipImportError(f'bad central directory offset: {archive!r}', path=archive) - header_position -= header_size - arc_offset = header_position - header_offset + header_position -= central_directory_size + # On just-a-zipfile these values are the same and arc_offset is zero; if + # the file has some bytes prepended, `arc_offset` is the number of such + # bytes. This is used for pex as well as self-extracting .exe. + arc_offset = header_position - central_directory_position if arc_offset < 0: raise ZipImportError(f'bad central directory size or offset: {archive!r}', path=archive) @@ -465,6 +491,11 @@ def _read_directory(archive): raise EOFError('EOF read where not expected') # Start of file header if buffer[:4] != b'PK\x01\x02': + if count != num_entries: + raise ZipImportError( + f"mismatched num_entries: {count} should be {num_entries} in {archive!r}", + path=archive, + ) break # Bad: Central Dir File Header if len(buffer) != 46: raise EOFError('EOF read where not expected') @@ -480,9 +511,6 @@ def _read_directory(archive): comment_size = _unpack_uint16(buffer[32:34]) file_offset = _unpack_uint32(buffer[42:46]) header_size = name_size + extra_size + comment_size - if file_offset > header_offset: - raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) - file_offset += arc_offset try: name = fp.read(name_size) @@ -494,7 +522,10 @@ def _read_directory(archive): # slower than reading the data because fseek flushes stdio's # internal buffers. See issue #8745. try: - if len(fp.read(header_size - name_size)) != header_size - name_size: + extra_data_len = header_size - name_size + extra_data = fp.read(extra_data_len) + + if len(extra_data) != extra_data_len: raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) except OSError: raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) @@ -511,6 +542,65 @@ def _read_directory(archive): name = name.replace('/', path_sep) path = _bootstrap_external._path_join(archive, name) + + # Ordering matches unpacking below. + if ( + file_size == MAX_UINT32 or + data_size == MAX_UINT32 or + file_offset == MAX_UINT32 + ): + # need to decode extra_data looking for a zip64 extra (which might not + # be present) + while extra_data: + if len(extra_data) < 4: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + tag = _unpack_uint16(extra_data[:2]) + size = _unpack_uint16(extra_data[2:4]) + if len(extra_data) < 4 + size: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + if tag == ZIP64_EXTRA_TAG: + if (len(extra_data) - 4) % 8 != 0: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + values = [ + int.from_bytes(extra_data[i:i+8], 'little') + for i in range(4, len(extra_data), 8) + ] + + # N.b. Here be dragons: the ordering of these is different than + # the header fields, and it's really easy to get it wrong since + # naturally-occuring zips that use all 3 are >4GB and not + # something that would be checked-in. + # The tests include a binary-edited zip that uses zip64 + # (unnecessarily) for all three. + if file_size == MAX_UINT32: + file_size = values.pop(0) + if data_size == MAX_UINT32: + data_size = values.pop(0) + if file_offset == MAX_UINT32: + file_offset = values.pop(0) + + if values: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + + break + + # For a typical zip, this bytes-slicing only happens 2-3 times, on + # small data like timestamps and filesizes. + extra_data = extra_data[4+size:] + else: + _bootstrap._verbose_message( + "zipimport: suspected zip64 but no zip64 extra for {!r}", + path, + ) + # XXX These two statements seem swapped because `header_offset` is a + # position within the actual file, but `file_offset` (when compared) is + # as encoded in the entry, not adjusted for this file. + # N.b. this must be after we've potentially read the zip64 extra which can + # change `file_offset`. + if file_offset > central_directory_position: + raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) + file_offset += arc_offset + t = (path, compress, data_size, file_size, file_offset, time, date, crc) files[name] = t count += 1 diff --git a/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst b/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst new file mode 100644 index 00000000000000..7dbb15f995c957 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst @@ -0,0 +1 @@ +The ``zipimport`` module can now read ZIP64 files. From f719c63eaf3de56a58c073e993290850c1cf7e1e Mon Sep 17 00:00:00 2001 From: Itamar Ostricher Date: Fri, 7 Jul 2023 21:08:24 -0700 Subject: [PATCH 2/7] Use helper method to get files for zip64 --- Lib/test/test_zipimport.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_zipimport.py b/Lib/test/test_zipimport.py index 00d19e339a73b7..ae49700294330c 100644 --- a/Lib/test/test_zipimport.py +++ b/Lib/test/test_zipimport.py @@ -128,6 +128,10 @@ def makeZip(self, files, zipName=TEMP_ZIP, **kw): f.write(stuff) f.write(data) + def getZip64Files(self): + # This is the simplest way to make zipfile generate the zip64 EOCD block + return {f"f{n}.py": (NOW, test_src) for n in range(65537)} + def doTest(self, expected_ext, files, *modules, **kw): self.makeZip(files, **kw) @@ -799,13 +803,11 @@ def testLargestPossibleComment(self): self.doTest(".py", files, TESTMOD, comment=b"c" * ((1 << 16) - 1)) def testZip64(self): - # This is the simplest way to make zipfile generate the zip64 EOCD block - files = {f"f{n}.py": (NOW, test_src) for n in range(65537)} + files = self.getZip64Files() self.doTest(".py", files, "f6") def testZip64CruftAndComment(self): - # This is the simplest way to make zipfile generate the zip64 EOCD block - files = {f"f{n}.py": (NOW, test_src) for n in range(65537)} + files = self.getZip64Files() self.doTest(".py", files, "f65536", comment=b"c" * ((1 << 16) - 1)) From 19d8bfae7c81c47f3484862487c76db7df495491 Mon Sep 17 00:00:00 2001 From: Itamar Ostricher Date: Fri, 7 Jul 2023 21:08:48 -0700 Subject: [PATCH 3/7] Update versionchanged to 3.13 --- Doc/library/zipimport.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Doc/library/zipimport.rst b/Doc/library/zipimport.rst index 68b8b5baa7ee2d..aff327fd4532c5 100644 --- a/Doc/library/zipimport.rst +++ b/Doc/library/zipimport.rst @@ -30,7 +30,7 @@ Any files may be present in the ZIP archive, but importers are only invoked for corresponding :file:`.pyc` file, meaning that if a ZIP archive doesn't contain :file:`.pyc` files, importing may be rather slow. -.. versionchanged:: 3.12 +.. versionchanged:: 3.13 ZIP64 is supported .. versionchanged:: 3.8 From ea8c9c7487cf7314617cc53e89d862a6010e83bd Mon Sep 17 00:00:00 2001 From: Itamar Ostricher Date: Sat, 8 Jul 2023 11:50:18 -0700 Subject: [PATCH 4/7] gps review feedback --- Lib/zipimport.py | 46 +++++++++---------- ...2-06-22-14-45-32.gh-issue-89739.CqZcRL.rst | 2 +- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/Lib/zipimport.py b/Lib/zipimport.py index ac014a5839f622..3e0d760aa3013d 100644 --- a/Lib/zipimport.py +++ b/Lib/zipimport.py @@ -368,16 +368,17 @@ def _read_directory(archive): file_size = fp.tell() except OSError: raise ZipImportError(f"can't read Zip file: {archive!r}", - path=archive) - max_comment_start = max(file_size - MAX_COMMENT_LEN - - END_CENTRAL_DIR_SIZE - END_CENTRAL_DIR_SIZE_64 - - END_CENTRAL_DIR_LOCATOR_SIZE_64, 0) + path=archive) + max_comment_plus_dirs_size = ( + MAX_COMMENT_LEN + END_CENTRAL_DIR_SIZE + + END_CENTRAL_DIR_SIZE_64 + END_CENTRAL_DIR_LOCATOR_SIZE_64) + max_comment_start = max(file_size - max_comment_plus_dirs_size, 0) try: fp.seek(max_comment_start) - data = fp.read() + data = fp.read(max_comment_plus_dirs_size) except OSError: raise ZipImportError(f"can't read Zip file: {archive!r}", - path=archive) + path=archive) pos = data.rfind(STRING_END_ARCHIVE) pos64 = data.rfind(STRING_END_ZIP_64) @@ -385,8 +386,10 @@ def _read_directory(archive): # Zip64 at "correct" offset from standard EOCD buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] if len(buffer) != END_CENTRAL_DIR_SIZE_64: - raise ZipImportError(f"corrupt Zip64 file: {archive!r}", - path=archive) + raise ZipImportError( + f"corrupt Zip64 file: Expected {END_CENTRAL_DIR_SIZE_64} byte " + f"zip64 central directory, but read {len(buffer)} bytes.", + path=archive) header_position = file_size - len(data) + pos64 central_directory_size = int.from_bytes(buffer[40:48], 'little') @@ -396,7 +399,7 @@ def _read_directory(archive): buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] if len(buffer) != END_CENTRAL_DIR_SIZE: raise ZipImportError(f"corrupt Zip file: {archive!r}", - path=archive) + path=archive) header_position = file_size - len(data) + pos @@ -410,7 +413,7 @@ def _read_directory(archive): # you need to adjust position by 76 for arc to be 0. else: raise ZipImportError(f'not a Zip file: {archive!r}', - path=archive) + path=archive) # Buffer now contains a valid EOCD, and header_position gives the # starting position of it. @@ -473,7 +476,7 @@ def _read_directory(archive): # internal buffers. See issue #8745. try: extra_data_len = header_size - name_size - extra_data = fp.read(extra_data_len) + extra_data = memoryview(fp.read(extra_data_len)) if len(extra_data) != extra_data_len: raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) @@ -511,17 +514,15 @@ def _read_directory(archive): if tag == ZIP64_EXTRA_TAG: if (len(extra_data) - 4) % 8 != 0: raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - values = [ - int.from_bytes(extra_data[i:i+8], 'little') - for i in range(4, len(extra_data), 8) - ] + num_extra_values = (len(extra_data) - 4) // 8 + if num_extra_values > 3: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + values = struct.unpack_from(f"<{min(num_extra_values, 3)}Q", + extra_data, offset=4) # N.b. Here be dragons: the ordering of these is different than # the header fields, and it's really easy to get it wrong since - # naturally-occuring zips that use all 3 are >4GB and not - # something that would be checked-in. - # The tests include a binary-edited zip that uses zip64 - # (unnecessarily) for all three. + # naturally-occuring zips that use all 3 are >4GB if file_size == MAX_UINT32: file_size = values.pop(0) if data_size == MAX_UINT32: @@ -529,9 +530,6 @@ def _read_directory(archive): if file_offset == MAX_UINT32: file_offset = values.pop(0) - if values: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - break # For a typical zip, this bytes-slicing only happens 2-3 times, on @@ -542,8 +540,8 @@ def _read_directory(archive): "zipimport: suspected zip64 but no zip64 extra for {!r}", path, ) - # XXX These two statements seem swapped because `header_offset` is a - # position within the actual file, but `file_offset` (when compared) is + # XXX These two statements seem swapped because `central_directory_position` + # is a position within the actual file, but `file_offset` (when compared) is # as encoded in the entry, not adjusted for this file. # N.b. this must be after we've potentially read the zip64 extra which can # change `file_offset`. diff --git a/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst b/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst index 7dbb15f995c957..0358c0107cb697 100644 --- a/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst +++ b/Misc/NEWS.d/next/Library/2022-06-22-14-45-32.gh-issue-89739.CqZcRL.rst @@ -1 +1 @@ -The ``zipimport`` module can now read ZIP64 files. +The :mod:`zipimport` module can now read ZIP64 files. From dc74fc0be20781123f9663ffed4eb9869585f6a5 Mon Sep 17 00:00:00 2001 From: Itamar Oren Date: Tue, 5 Mar 2024 21:25:24 -0800 Subject: [PATCH 5/7] Add ``_unpack_uint64`` to ``_bootstrap_external`` --- Lib/importlib/_bootstrap_external.py | 5 +++++ Lib/zipimport.py | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py index 2a9aef03179f6f..fccaeaf6430fc7 100644 --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -81,6 +81,11 @@ def _pack_uint32(x): return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') +def _unpack_uint64(data): + """Convert 8 bytes in little-endian to an integer.""" + assert len(data) == 8 + return int.from_bytes(data, 'little') + def _unpack_uint32(data): """Convert 4 bytes in little-endian to an integer.""" assert len(data) == 4 diff --git a/Lib/zipimport.py b/Lib/zipimport.py index cd3d95998acaea..21d2dca46f569b 100644 --- a/Lib/zipimport.py +++ b/Lib/zipimport.py @@ -15,7 +15,7 @@ #from importlib import _bootstrap_external #from importlib import _bootstrap # for _verbose_message import _frozen_importlib_external as _bootstrap_external -from _frozen_importlib_external import _unpack_uint16, _unpack_uint32 +from _frozen_importlib_external import _unpack_uint16, _unpack_uint32, _unpack_uint64 import _frozen_importlib as _bootstrap # for _verbose_message import _imp # for check_hash_based_pycs import _io # for open @@ -392,9 +392,9 @@ def _read_directory(archive): path=archive) header_position = file_size - len(data) + pos64 - central_directory_size = int.from_bytes(buffer[40:48], 'little') - central_directory_position = int.from_bytes(buffer[48:56], 'little') - num_entries = int.from_bytes(buffer[24:32], 'little') + central_directory_size = _unpack_uint64(buffer[40:48]) + central_directory_position = _unpack_uint64(buffer[48:56]) + num_entries = _unpack_uint64(buffer[24:32]) elif pos >= 0: buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] if len(buffer) != END_CENTRAL_DIR_SIZE: From 9089cb165544c5d7dc2492e3c3ba62ccdeec55ac Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith [Google LLC]" Date: Thu, 28 Mar 2024 06:18:09 +0000 Subject: [PATCH 6/7] Add a what's new entry. --- Doc/whatsnew/3.13.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index e6234bf974ea47..ee1df7f73456ef 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -699,6 +699,12 @@ xml.etree.ElementTree * Add the :meth:`!close` method for the iterator returned by :func:`~xml.etree.ElementTree.iterparse` for explicit cleaning up. (Contributed by Serhiy Storchaka in :gh:`69893`.) + +zipimport +--------- + +* Gains support for ZIP64 format files. Everybody loves huge code right? + (Contributed by Tim Hatch in :gh:`94146`.) Optimizations From ed47eb8e434d5b4904ca7020176c50671a8cd1bf Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith [Google LLC]" Date: Thu, 28 Mar 2024 06:23:39 +0000 Subject: [PATCH 7/7] --trailing space --- Doc/whatsnew/3.13.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index ee1df7f73456ef..5a5c506d83d735 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -699,7 +699,7 @@ xml.etree.ElementTree * Add the :meth:`!close` method for the iterator returned by :func:`~xml.etree.ElementTree.iterparse` for explicit cleaning up. (Contributed by Serhiy Storchaka in :gh:`69893`.) - + zipimport --------- pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy