From 4bd54bd32d525ec41e23960146fbb3b0418a4715 Mon Sep 17 00:00:00 2001 From: Stefan <96178532+stefan6419846@users.noreply.github.com> Date: Sun, 28 Jul 2024 17:16:57 +0200 Subject: [PATCH 01/43] DEV: Test against Python 3.13 (#2776) * DEV: Test against Python 3.13 * fix typo * add missing setup-python * fix another typo * update Pillow version * attempt to update coverage package * update number of expected coverage files --- .github/workflows/github-ci.yaml | 10 +++++----- requirements/ci-3.11.txt | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/github-ci.yaml b/.github/workflows/github-ci.yaml index 820ccdcaa5..1eb3d9bd03 100644 --- a/.github/workflows/github-ci.yaml +++ b/.github/workflows/github-ci.yaml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13-dev"] use-crypto-lib: ["cryptography"] include: - python-version: "3.7" @@ -90,7 +90,7 @@ jobs: cache-dependency-path: '**/requirements/ci.txt' - name: Setup Python (3.11+) uses: actions/setup-python@v5 - if: matrix.python-version == '3.11' || matrix.python-version == '3.12' + if: matrix.python-version == '3.11' || matrix.python-version == '3.12' || matrix.python-version == '3.13-dev' with: python-version: ${{ matrix.python-version }} allow-prereleases: true @@ -106,7 +106,7 @@ jobs: - name: Install requirements (Python 3.11+) run: | pip install -r requirements/ci-3.11.txt - if: matrix.python-version == '3.11' || matrix.python-version == '3.12' + if: matrix.python-version == '3.11' || matrix.python-version == '3.12' || matrix.python-version == '3.13-dev' - name: Remove pycryptodome and cryptography run: | pip uninstall pycryptodome cryptography -y @@ -215,8 +215,8 @@ jobs: - name: Check Number of Downloaded Files run: | downloaded_files_count=$(find \.coverage* -type f | wc -l) - if [ $downloaded_files_count -eq 8 ]; then - echo "The expected number of files (8) were downloaded." + if [ $downloaded_files_count -eq 9 ]; then + echo "The expected number of files (9) were downloaded." else echo "ERROR: Expected 8 files, but found $downloaded_files_count files." exit 1 diff --git a/requirements/ci-3.11.txt b/requirements/ci-3.11.txt index f382fe2b94..2101771181 100644 --- a/requirements/ci-3.11.txt +++ b/requirements/ci-3.11.txt @@ -6,7 +6,7 @@ # attrs==23.1.0 # via flake8-bugbear -coverage[toml]==7.3.0 +coverage[toml]==7.6.0 # via # -r requirements/ci.in # pytest-cov @@ -35,7 +35,7 @@ mypy-extensions==1.0.0 # via mypy packaging==23.1 # via pytest -pillow==10.0.1 +pillow==10.4.0 # via # -r requirements/ci.in # fpdf2 From d4df20d14cb6a2839c1ab141b51e70652fb3d1f1 Mon Sep 17 00:00:00 2001 From: j-t-1 <120829237+j-t-1@users.noreply.github.com> Date: Wed, 31 Jul 2024 10:46:08 +0100 Subject: [PATCH 02/43] STY: Remove boolean value comparison (#2779) PEP 8 recommendation. --- pypdf/annotations/_markup_annotations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pypdf/annotations/_markup_annotations.py b/pypdf/annotations/_markup_annotations.py index 4db8dfdbf0..98a222483b 100644 --- a/pypdf/annotations/_markup_annotations.py +++ b/pypdf/annotations/_markup_annotations.py @@ -104,9 +104,9 @@ def __init__( self[NameObject("/Rect")] = RectangleObject(rect) font_str = "font: " - if bold is True: + if bold: font_str = f"{font_str}bold " - if italic is True: + if italic: font_str = f"{font_str}italic " font_str = f"{font_str}{font} {font_size}" font_str = f"{font_str};text-align:left;color:#{font_color}" From 3ad9234c2ec08e7cd6a8b2ec962386eda394d76d Mon Sep 17 00:00:00 2001 From: "William G. Gagnon" Date: Fri, 2 Aug 2024 11:21:53 -0400 Subject: [PATCH 03/43] ROB: Handle images with empty data when processing an image from bytes (#2786) Closes #2783. --- CONTRIBUTORS.md | 1 + pypdf/_xobj_image_helpers.py | 9 ++++++--- pypdf/errors.py | 4 ++++ tests/test_xobject_image_helpers.py | 13 +++++++++++-- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 84f0b6ee43..89fec3b14e 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -19,6 +19,7 @@ history and [GitHub's 'Contributors' feature](https://github.com/py-pdf/pypdf/gr * [ediamondscience](https://github.com/ediamondscience) * [Ermeson, Felipe](https://github.com/FelipeErmeson) * [Freitag, François](https://github.com/francoisfreitag) +* [Gagnon, William G.](https://github.com/williamgagnon) * [Górny, Michał](https://github.com/mgorny) * [Grillo, Miguel](https://github.com/Ineffable22) * [Gutteridge, David H.](https://github.com/dhgutteridge) diff --git a/pypdf/_xobj_image_helpers.py b/pypdf/_xobj_image_helpers.py index 45b0c145be..5ae8894fa3 100644 --- a/pypdf/_xobj_image_helpers.py +++ b/pypdf/_xobj_image_helpers.py @@ -6,7 +6,7 @@ from ._utils import check_if_whitespace_only, logger_warning from .constants import ColorSpaces -from .errors import PdfReadError +from .errors import EmptyImageDataError, PdfReadError from .generic import ( ArrayObject, DecodedStreamObject, @@ -148,9 +148,12 @@ def _extended_image_frombytes( img = Image.frombytes(mode, size, data) except ValueError as exc: nb_pix = size[0] * size[1] - if len(data) % nb_pix != 0: + data_length = len(data) + if data_length == 0: + raise EmptyImageDataError("Data is 0 bytes, cannot process an image from empty data.") from exc + if data_length % nb_pix != 0: raise exc - k = nb_pix * len(mode) / len(data) + k = nb_pix * len(mode) / data_length data = b"".join([bytes((x,) * int(k)) for x in data]) img = Image.frombytes(mode, size, data) return img diff --git a/pypdf/errors.py b/pypdf/errors.py index c962dec662..ad197ffc11 100644 --- a/pypdf/errors.py +++ b/pypdf/errors.py @@ -59,4 +59,8 @@ class EmptyFileError(PdfReadError): """Raised when a PDF file is empty or has no content.""" +class EmptyImageDataError(PyPdfError): + """Raised when trying to process an image that has no data.""" + + STREAM_TRUNCATED_PREMATURELY = "Stream has ended unexpectedly" diff --git a/tests/test_xobject_image_helpers.py b/tests/test_xobject_image_helpers.py index 63ecebd9b4..39b7131fcd 100644 --- a/tests/test_xobject_image_helpers.py +++ b/tests/test_xobject_image_helpers.py @@ -4,8 +4,8 @@ import pytest from pypdf import PdfReader -from pypdf._xobj_image_helpers import _handle_flate -from pypdf.errors import PdfReadError +from pypdf._xobj_image_helpers import _extended_image_frombytes, _handle_flate +from pypdf.errors import EmptyImageDataError, PdfReadError from pypdf.generic import ArrayObject, DecodedStreamObject, NameObject, NumberObject from . import get_data_from_url @@ -113,3 +113,12 @@ def test_handle_flate__image_mode_1(): colors=2, obj_as_text="dummy", ) + + +def test_extended_image_frombytes_zero_data(): + mode = "RGB" + size = (1, 1) + data = b"" + + with pytest.raises(EmptyImageDataError, match="Data is 0 bytes, cannot process an image from empty data."): + _extended_image_frombytes(mode, size, data) From 582557e09a7e658fdcb19f26eb069d87875489f0 Mon Sep 17 00:00:00 2001 From: Diogo Teles Sant'Anna Date: Fri, 2 Aug 2024 15:49:29 -0300 Subject: [PATCH 04/43] SEC: Fix GitHub workflow vulnerable to script injection (#2787) Signed-off-by: Diogo Teles Sant'Anna --- .github/workflows/release.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9f782ec080..b1a4fb27f3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,6 +12,9 @@ on: permissions: contents: write +env: + HEAD_COMMIT_MESSAGE: ${{ github.event.head_commit.message }} + jobs: build_and_publish: name: Publish a new version @@ -24,7 +27,7 @@ jobs: - name: Extract version from commit message id: extract_version run: | - VERSION=$(echo "${{ github.event.head_commit.message }}" | grep -oP '(?<=REL: )\d+\.\d+\.\d+') + VERSION=$(echo "$HEAD_COMMIT_MESSAGE" | grep -oP '(?<=REL: )\d+\.\d+\.\d+') echo "version=$VERSION" >> $GITHUB_OUTPUT - name: Extract tag message from commit message @@ -32,7 +35,7 @@ jobs: run: | VERSION="${{ steps.extract_version.outputs.version }}" delimiter="$(openssl rand -hex 8)" - MESSAGE=$(echo "${{ github.event.head_commit.message }}" | sed "0,/REL: $VERSION/s///" ) + MESSAGE=$(echo "$HEAD_COMMIT_MESSAGE" | sed "0,/REL: $VERSION/s///" ) echo "message<<${delimiter}" >> $GITHUB_OUTPUT echo "$MESSAGE" >> $GITHUB_OUTPUT echo "${delimiter}" >> $GITHUB_OUTPUT From 38f3925502c2971ad587fb616500b6f8b6333d03 Mon Sep 17 00:00:00 2001 From: j-t-1 <120829237+j-t-1@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:10:47 +0100 Subject: [PATCH 05/43] MAINT: Remove unused paeth_predictor (#2773) --- pypdf/_utils.py | 14 -------------- tests/test_utils.py | 18 ------------------ 2 files changed, 32 deletions(-) diff --git a/pypdf/_utils.py b/pypdf/_utils.py index 38c0d67d7a..6569707b66 100644 --- a/pypdf/_utils.py +++ b/pypdf/_utils.py @@ -390,20 +390,6 @@ def ord_(b: Union[int, str, bytes]) -> Union[int, bytes]: WHITESPACES_AS_REGEXP = b"[" + WHITESPACES_AS_BYTES + b"]" -def paeth_predictor(left: int, up: int, up_left: int) -> int: - p = left + up - up_left - dist_left = abs(p - left) - dist_up = abs(p - up) - dist_up_left = abs(p - up_left) - - if dist_left <= dist_up and dist_left <= dist_up_left: - return left - elif dist_up <= dist_up_left: - return up - else: - return up_left - - def deprecate(msg: str, stacklevel: int = 3) -> None: warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) diff --git a/tests/test_utils.py b/tests/test_utils.py index 81fcf9fb47..856bedd863 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -132,24 +132,6 @@ def test_deprecate_no_replacement(): assert warn[0].message.args[0] == error_msg -@pytest.mark.parametrize( - ("left", "up", "upleft", "expected"), - [ - (0, 0, 0, 0), - (1, 0, 0, 1), - (0, 1, 0, 1), - (0, 0, 1, 0), - (1, 2, 3, 1), - (2, 1, 3, 1), - (1, 3, 2, 2), - (3, 1, 2, 2), - (3, 2, 1, 3), - ], -) -def test_paeth_predictor(left, up, upleft, expected): - assert pypdf._utils.paeth_predictor(left, up, upleft) == expected - - @pytest.mark.parametrize( ("dat", "pos", "to_read", "expected", "expected_pos"), [ From 09f9b7ed52193bfd9e98bdd018ccaf7cbe821687 Mon Sep 17 00:00:00 2001 From: j-t-1 <120829237+j-t-1@users.noreply.github.com> Date: Mon, 5 Aug 2024 16:49:29 +0100 Subject: [PATCH 06/43] MAINT: Remove unused AnnotationFlag --- pypdf/annotations/_non_markup_annotations.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/pypdf/annotations/_non_markup_annotations.py b/pypdf/annotations/_non_markup_annotations.py index dcdb3b0ff8..6272cceee6 100644 --- a/pypdf/annotations/_non_markup_annotations.py +++ b/pypdf/annotations/_non_markup_annotations.py @@ -1,6 +1,5 @@ from typing import TYPE_CHECKING, Any, Optional, Tuple, Union -from ..constants import AnnotationFlag from ..generic._base import ( BooleanObject, NameObject, @@ -12,8 +11,6 @@ from ..generic._rectangle import RectangleObject from ._base import AnnotationDictionary -DEFAULT_ANNOTATION_FLAG = AnnotationFlag(0) - class Link(AnnotationDictionary): def __init__( From b2d72043ab5221b58138c7d06c181b8cbc88ea8e Mon Sep 17 00:00:00 2001 From: owurman Date: Mon, 5 Aug 2024 12:14:18 -0700 Subject: [PATCH 07/43] BUG: Handle Sequence as an IndirectObject when extracting text with layout mode (#2788) * Handle Sequence as an IndirectObject The spec allows an int or float to be an IndirectObject as well, but this commit does not address that theoretical possibility. * Update pypdf/_text_extraction/_layout_mode/_font.py Co-authored-by: Stefan <96178532+stefan6419846@users.noreply.github.com> * Address PR comments -Rename w_1 to w_next_entry -Utilize ParseError instead of PdfReadError -Write a test (both positive and negative) * Handle unlikely case of IndirectObjects for float/int width elements Also adds a comment to clarify that we don't explicitly handle the IndexError exception. Rather, we let it be raised as an IndexError. * Yoda condition I removed * Last commit was a bad patch, confused by non-committed changes * Use test files from URL rather than resources * Update tests/test_text_extraction.py Co-authored-by: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> * Fix code style warnings in range() call --------- Co-authored-by: Stefan <96178532+stefan6419846@users.noreply.github.com> Co-authored-by: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> --- pypdf/_text_extraction/_layout_mode/_font.py | 26 +++++++++++++------- tests/test_text_extraction.py | 17 +++++++++++++ 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/pypdf/_text_extraction/_layout_mode/_font.py b/pypdf/_text_extraction/_layout_mode/_font.py index a912fddb27..40655b1b22 100644 --- a/pypdf/_text_extraction/_layout_mode/_font.py +++ b/pypdf/_text_extraction/_layout_mode/_font.py @@ -1,8 +1,9 @@ """Font constants and classes for "layout" mode text operations""" from dataclasses import dataclass, field -from typing import Any, Dict, Sequence, Union +from typing import Any, Dict, Sequence, Union, cast +from ...errors import ParseError from ...generic import IndirectObject from ._font_widths import STANDARD_WIDTHS @@ -58,6 +59,7 @@ def __post_init__(self) -> None: skip_count = 0 _w = d_font.get("/W", []) for idx, w_entry in enumerate(_w): + w_entry = w_entry.get_object() if skip_count: skip_count -= 1 continue @@ -66,13 +68,14 @@ def __post_init__(self) -> None: # warning and or use reader's "strict" to force an ex??? continue # check for format (1): `int [int int int int ...]` - if isinstance(_w[idx + 1], Sequence): - start_idx, width_list = _w[idx : idx + 2] + w_next_entry = _w[idx + 1].get_object() + if isinstance(w_next_entry, Sequence): + start_idx, width_list = w_entry, w_next_entry self.width_map.update( { ord_map[_cidx]: _width for _cidx, _width in zip( - range(start_idx, start_idx + len(width_list), 1), + range(cast(int, start_idx), cast(int, start_idx) + len(width_list), 1), width_list, ) if _cidx in ord_map @@ -80,18 +83,23 @@ def __post_init__(self) -> None: ) skip_count = 1 # check for format (2): `int int int` - if not isinstance(_w[idx + 1], Sequence) and not isinstance( - _w[idx + 2], Sequence - ): - start_idx, stop_idx, const_width = _w[idx : idx + 3] + elif isinstance(w_next_entry, (int, float)) and isinstance(_w[idx + 2].get_object(), (int, float)): + start_idx, stop_idx, const_width = w_entry, w_next_entry, _w[idx + 2].get_object() self.width_map.update( { ord_map[_cidx]: const_width - for _cidx in range(start_idx, stop_idx + 1, 1) + for _cidx in range(cast(int, start_idx), cast(int, stop_idx + 1), 1) if _cidx in ord_map } ) skip_count = 2 + else: + # Note: this doesn't handle the case of out of bounds (reaching the end of the width definitions + # while expecting more elements). This raises an IndexError which is sufficient. + raise ParseError( + f"Invalid font width definition. Next elements: {w_entry}, {w_next_entry}, {_w[idx + 2]}" + ) # pragma: no cover + if not self.width_map and "/BaseFont" in self.font_dictionary: for key in STANDARD_WIDTHS: if self.font_dictionary["/BaseFont"].startswith(f"/{key}"): diff --git a/tests/test_text_extraction.py b/tests/test_text_extraction.py index 1ffa68a3e6..dcd4e6caeb 100644 --- a/tests/test_text_extraction.py +++ b/tests/test_text_extraction.py @@ -10,6 +10,7 @@ from pypdf import PdfReader, mult from pypdf._text_extraction import set_custom_rtl +from pypdf.errors import ParseError from . import get_data_from_url @@ -156,3 +157,19 @@ def test_layout_mode_type0_font_widths(): encoding="utf-8" ) assert expected == reader.pages[0].extract_text(extraction_mode="layout") + + +@pytest.mark.enable_socket() +def test_layout_mode_indirect_sequence_font_widths(): + # Cover the situation where the sequence for font widths is an IndirectObject + # ref https://github.com/py-pdf/pypdf/pull/2788 + url = "https://github.com/user-attachments/files/16491621/2788_example.pdf" + name ="2788_example.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + assert reader.pages[0].extract_text(extraction_mode="layout") == "" + url = "https://github.com/user-attachments/files/16491619/2788_example_malformed.pdf" + name = "2788_example_malformed.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + with pytest.raises(ParseError) as exc: + reader.pages[0].extract_text(extraction_mode="layout") + assert str(exc.value).startswith("Invalid font width definition") From 5abd590740a2718fc69b8477c656ce5515a0ab33 Mon Sep 17 00:00:00 2001 From: j-t-1 <120829237+j-t-1@users.noreply.github.com> Date: Wed, 7 Aug 2024 12:14:17 +0100 Subject: [PATCH 08/43] STY: Refactor b_ (#2772) --- pypdf/_utils.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/pypdf/_utils.py b/pypdf/_utils.py index 6569707b66..5fecb38e7d 100644 --- a/pypdf/_utils.py +++ b/pypdf/_utils.py @@ -347,14 +347,11 @@ def b_(s: Union[str, bytes]) -> bytes: return bc[s] try: r = s.encode("latin-1") - if len(s) < 2: - bc[s] = r - return r - except Exception: + except UnicodeEncodeError: r = s.encode("utf-8") - if len(s) < 2: - bc[s] = r - return r + if len(s) < 2: + bc[s] = r + return r def str_(b: Any) -> str: From 219eb13f7eb9c2cd9519e9a69d639250853bd823 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:52:40 +0200 Subject: [PATCH 09/43] MAINT: Drop Python 3.7 support (#2793) --- .github/workflows/github-ci.yaml | 14 +++++++------- pypdf/_page.py | 8 +------- pypdf/_protocols.py | 8 +------- .../_layout_mode/_fixed_width_page.py | 8 +------- pypdf/_xobj_image_helpers.py | 13 ++++--------- pypdf/types.py | 8 +------- 6 files changed, 15 insertions(+), 44 deletions(-) diff --git a/.github/workflows/github-ci.yaml b/.github/workflows/github-ci.yaml index 1eb3d9bd03..d5d9bb4d4d 100644 --- a/.github/workflows/github-ci.yaml +++ b/.github/workflows/github-ci.yaml @@ -57,12 +57,12 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13-dev"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13-dev"] use-crypto-lib: ["cryptography"] include: - - python-version: "3.7" + - python-version: "3.8" use-crypto-lib: "pycryptodome" - - python-version: "3.7" + - python-version: "3.8" use-crypto-lib: "none" steps: - name: Update APT packages @@ -83,7 +83,7 @@ jobs: key: cache-downloaded-files - name: Setup Python uses: actions/setup-python@v5 - if: matrix.python-version == '3.7' || matrix.python-version == '3.8' || matrix.python-version == '3.9' || matrix.python-version == '3.10' + if: matrix.python-version == '3.8' || matrix.python-version == '3.9' || matrix.python-version == '3.10' with: python-version: ${{ matrix.python-version }} cache: 'pip' @@ -102,7 +102,7 @@ jobs: - name: Install requirements (Python 3) run: | pip install -r requirements/ci.txt - if: matrix.python-version == '3.7' || matrix.python-version == '3.8' || matrix.python-version == '3.9' || matrix.python-version == '3.10' + if: matrix.python-version == '3.8' || matrix.python-version == '3.9' || matrix.python-version == '3.10' - name: Install requirements (Python 3.11+) run: | pip install -r requirements/ci-3.11.txt @@ -215,8 +215,8 @@ jobs: - name: Check Number of Downloaded Files run: | downloaded_files_count=$(find \.coverage* -type f | wc -l) - if [ $downloaded_files_count -eq 9 ]; then - echo "The expected number of files (9) were downloaded." + if [ $downloaded_files_count -eq 8 ]; then + echo "The expected number of files (8) were downloaded." else echo "ERROR: Expected 8 files, but found $downloaded_files_count files." exit 1 diff --git a/pypdf/_page.py b/pypdf/_page.py index 63038d9d07..ee1dc7f602 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -28,7 +28,6 @@ # POSSIBILITY OF SUCH DAMAGE. import math -import sys from decimal import Decimal from pathlib import Path from typing import ( @@ -38,6 +37,7 @@ Iterable, Iterator, List, + Literal, Optional, Sequence, Set, @@ -85,12 +85,6 @@ StreamObject, ) -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - - MERGE_CROP_BOX = "cropbox" # pypdf<=3.4.0 used 'trimbox' diff --git a/pypdf/_protocols.py b/pypdf/_protocols.py index 9f413660bb..b5fa14879c 100644 --- a/pypdf/_protocols.py +++ b/pypdf/_protocols.py @@ -2,13 +2,7 @@ from abc import abstractmethod from pathlib import Path -from typing import IO, Any, Dict, List, Optional, Tuple, Union - -try: - # Python 3.8+: https://peps.python.org/pep-0586 - from typing import Protocol -except ImportError: - from typing_extensions import Protocol # type: ignore[assignment] +from typing import IO, Any, Dict, List, Optional, Protocol, Tuple, Union from ._utils import StrByteType, StreamType diff --git a/pypdf/_text_extraction/_layout_mode/_fixed_width_page.py b/pypdf/_text_extraction/_layout_mode/_fixed_width_page.py index 1be500959c..e7af1b2340 100644 --- a/pypdf/_text_extraction/_layout_mode/_fixed_width_page.py +++ b/pypdf/_text_extraction/_layout_mode/_fixed_width_page.py @@ -1,10 +1,9 @@ """Extract PDF text preserving the layout of the source PDF""" -import sys from itertools import groupby from math import ceil from pathlib import Path -from typing import Any, Dict, Iterator, List, Optional, Tuple +from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, TypedDict from ..._utils import logger_warning from .. import LAYOUT_NEW_BT_GROUP_SPACE_WIDTHS @@ -12,11 +11,6 @@ from ._text_state_manager import TextStateManager from ._text_state_params import TextStateParams -if sys.version_info >= (3, 8): - from typing import Literal, TypedDict -else: - from typing_extensions import Literal, TypedDict - class BTGroup(TypedDict): """ diff --git a/pypdf/_xobj_image_helpers.py b/pypdf/_xobj_image_helpers.py index 5ae8894fa3..7a3f40d95c 100644 --- a/pypdf/_xobj_image_helpers.py +++ b/pypdf/_xobj_image_helpers.py @@ -2,7 +2,7 @@ import sys from io import BytesIO -from typing import Any, List, Tuple, Union, cast +from typing import Any, List, Literal, Tuple, Union, cast from ._utils import check_if_whitespace_only, logger_warning from .constants import ColorSpaces @@ -15,13 +15,6 @@ NullObject, ) -if sys.version_info[:2] >= (3, 8): - from typing import Literal -else: - # PEP 586 introduced typing.Literal with Python 3.8 - # For older Python versions, the backport typing_extensions is necessary: - from typing_extensions import Literal - if sys.version_info[:2] >= (3, 10): from typing import TypeAlias else: @@ -150,7 +143,9 @@ def _extended_image_frombytes( nb_pix = size[0] * size[1] data_length = len(data) if data_length == 0: - raise EmptyImageDataError("Data is 0 bytes, cannot process an image from empty data.") from exc + raise EmptyImageDataError( + "Data is 0 bytes, cannot process an image from empty data." + ) from exc if data_length % nb_pix != 0: raise exc k = nb_pix * len(mode) / data_length diff --git a/pypdf/types.py b/pypdf/types.py index b8fbab92cf..e383dc7b1f 100644 --- a/pypdf/types.py +++ b/pypdf/types.py @@ -1,13 +1,7 @@ """Helpers for working with PDF types.""" import sys -from typing import List, Union - -if sys.version_info[:2] >= (3, 8): - # Python 3.8+: https://peps.python.org/pep-0586 - from typing import Literal -else: - from typing_extensions import Literal +from typing import List, Literal, Union if sys.version_info[:2] >= (3, 10): # Python 3.10+: https://www.python.org/dev/peps/pep-0484 From 46c89dd8e1e8641a49624f3fbc1865f9c4b41374 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:57:25 +0200 Subject: [PATCH 10/43] MAINT: Remove b_ and str_ (#2792) Closes #2726. Closes #2791. --- pypdf/_cmap.py | 30 +++---- pypdf/_doc_common.py | 3 +- pypdf/_encryption.py | 6 +- pypdf/_merger.py | 10 ++- pypdf/_page.py | 16 ++-- pypdf/_reader.py | 5 +- pypdf/_utils.py | 34 ++------ pypdf/_writer.py | 6 +- pypdf/filters.py | 53 ++++++++---- pypdf/generic/_base.py | 39 +++++---- pypdf/generic/_data_structures.py | 35 ++++---- pypdf/generic/_utils.py | 77 ++++++++++-------- ..._Vicksburg_Sample_OCR-crazyones-merged.pdf | Bin 217096 -> 217093 bytes tests/test_cmap.py | 18 +++- tests/test_page.py | 13 ++- tests/test_utils.py | 16 ---- tests/test_workflows.py | 4 +- tests/test_writer.py | 2 +- 18 files changed, 193 insertions(+), 174 deletions(-) diff --git a/pypdf/_cmap.py b/pypdf/_cmap.py index 9a2d10a611..d635724d25 100644 --- a/pypdf/_cmap.py +++ b/pypdf/_cmap.py @@ -3,11 +3,10 @@ from typing import Any, Dict, List, Tuple, Union, cast from ._codecs import adobe_glyphs, charset_encoding -from ._utils import b_, logger_error, logger_warning +from ._utils import logger_error, logger_warning from .generic import ( DecodedStreamObject, DictionaryObject, - IndirectObject, NullObject, StreamObject, ) @@ -258,7 +257,7 @@ def prepare_cm(ft: DictionaryObject) -> bytes: tu = ft["/ToUnicode"] cm: bytes if isinstance(tu, StreamObject): - cm = b_(cast(DecodedStreamObject, ft["/ToUnicode"]).get_data()) + cm = cast(DecodedStreamObject, ft["/ToUnicode"]).get_data() elif isinstance(tu, str) and tu.startswith("/Identity"): # the full range 0000-FFFF will be processed cm = b"beginbfrange\n<0000> <0001> <0000>\nendbfrange" @@ -448,34 +447,27 @@ def compute_space_width( en: int = cast(int, ft["/LastChar"]) if st > space_code or en < space_code: raise Exception("Not in range") - if w[space_code - st] == 0: + if w[space_code - st].get_object() == 0: raise Exception("null width") - sp_width = w[space_code - st] + sp_width = w[space_code - st].get_object() except Exception: if "/FontDescriptor" in ft and "/MissingWidth" in cast( DictionaryObject, ft["/FontDescriptor"] ): - sp_width = ft["/FontDescriptor"]["/MissingWidth"] # type: ignore + sp_width = ft["/FontDescriptor"]["/MissingWidth"].get_object() # type: ignore else: # will consider width of char as avg(width)/2 m = 0 cpt = 0 - for x in w: - if x > 0: - m += x + for xx in w: + xx = xx.get_object() + if xx > 0: + m += xx cpt += 1 sp_width = m / max(1, cpt) / 2 - if isinstance(sp_width, IndirectObject): - # According to - # 'Table 122 - Entries common to all font descriptors (continued)' - # the MissingWidth should be a number, but according to #2286 it can - # be an indirect object - obj = sp_width.get_object() - if obj is None or isinstance(obj, NullObject): - return 0.0 - return obj # type: ignore - + if sp_width is None or isinstance(sp_width, NullObject): + sp_width = 0.0 return sp_width diff --git a/pypdf/_doc_common.py b/pypdf/_doc_common.py index d4c5c43c3c..ffbdb7882e 100644 --- a/pypdf/_doc_common.py +++ b/pypdf/_doc_common.py @@ -49,7 +49,6 @@ from ._page import PageObject, _VirtualList from ._page_labels import index2label as page_index2page_label from ._utils import ( - b_, deprecate_with_replacement, logger_warning, parse_iso8824_date, @@ -1258,7 +1257,7 @@ def xfa(self) -> Optional[Dict[str, Any]]: if isinstance(f, IndirectObject): field = cast(Optional[EncodedStreamObject], f.get_object()) if field: - es = zlib.decompress(b_(field._data)) + es = zlib.decompress(field._data) retval[tag] = es return retval diff --git a/pypdf/_encryption.py b/pypdf/_encryption.py index 5ddd8d0efe..e5cdd9324e 100644 --- a/pypdf/_encryption.py +++ b/pypdf/_encryption.py @@ -43,7 +43,7 @@ rc4_encrypt, ) -from ._utils import b_, logger_warning +from ._utils import logger_warning from .generic import ( ArrayObject, ByteStringObject, @@ -78,7 +78,7 @@ def encrypt_object(self, obj: PdfObject) -> PdfObject: elif isinstance(obj, StreamObject): obj2 = StreamObject() obj2.update(obj) - obj2.set_data(self.stm_crypt.encrypt(b_(obj._data))) + obj2.set_data(self.stm_crypt.encrypt(obj._data)) for key, value in obj.items(): # Dont forget the Stream dict. obj2[key] = self.encrypt_object(value) obj = obj2 @@ -96,7 +96,7 @@ def decrypt_object(self, obj: PdfObject) -> PdfObject: data = self.str_crypt.decrypt(obj.original_bytes) obj = create_string_object(data) elif isinstance(obj, StreamObject): - obj._data = self.stm_crypt.decrypt(b_(obj._data)) + obj._data = self.stm_crypt.decrypt(obj._data) for key, value in obj.items(): # Dont forget the Stream dict. obj[key] = self.decrypt_object(value) elif isinstance(obj, DictionaryObject): diff --git a/pypdf/_merger.py b/pypdf/_merger.py index 7176a1adf7..a52a354e38 100644 --- a/pypdf/_merger.py +++ b/pypdf/_merger.py @@ -46,7 +46,6 @@ from ._utils import ( StrByteType, deprecate_with_replacement, - str_, ) from ._writer import PdfWriter from .constants import GoToActionArguments, TypArguments, TypFitArguments @@ -82,6 +81,15 @@ def __init__(self, pagedata: PageObject, src: PdfReader, id: int) -> None: self.id = id +# transfered from _utils : as this function is only required here +# and merger will be soon deprecated +def str_(b: Any) -> str: # pragma: no cover + if isinstance(b, bytes): + return b.decode("latin-1") + else: + return str(b) # will return b.__str__() if defined + + class PdfMerger: """ Use :class:`PdfWriter` instead. diff --git a/pypdf/_page.py b/pypdf/_page.py index ee1dc7f602..48cdeb149f 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -846,7 +846,7 @@ def _add_transformation_matrix( FloatObject(e), FloatObject(f), ], - " cm", + b"cm", ], ) return contents @@ -864,7 +864,7 @@ def _get_contents_as_bytes(self) -> Optional[bytes]: if isinstance(obj, list): return b"".join(x.get_object().get_data() for x in obj) else: - return cast(bytes, cast(EncodedStreamObject, obj).get_data()) + return cast(EncodedStreamObject, obj).get_data() else: return None @@ -1057,11 +1057,11 @@ def _merge_page( rect.height, ], ), - "re", + b"re", ), ) - page2content.operations.insert(1, ([], "W")) - page2content.operations.insert(2, ([], "n")) + page2content.operations.insert(1, ([], b"W")) + page2content.operations.insert(2, ([], b"n")) if page2transformation is not None: page2content = page2transformation(page2content) page2content = PageObject._content_stream_rename( @@ -1195,11 +1195,11 @@ def _merge_page_writer( rect.height, ], ), - "re", + b"re", ), ) - page2content.operations.insert(1, ([], "W")) - page2content.operations.insert(2, ([], "n")) + page2content.operations.insert(1, ([], b"W")) + page2content.operations.insert(2, ([], b"n")) if page2transformation is not None: page2content = page2transformation(page2content) page2content = PageObject._content_stream_rename( diff --git a/pypdf/_reader.py b/pypdf/_reader.py index aeababa7b7..7c084107c5 100644 --- a/pypdf/_reader.py +++ b/pypdf/_reader.py @@ -51,7 +51,6 @@ from ._utils import ( StrByteType, StreamType, - b_, logger_warning, read_non_whitespace, read_previous_line, @@ -328,7 +327,7 @@ def _get_object_from_stream( assert cast(str, obj_stm["/Type"]) == "/ObjStm" # /N is the number of indirect objects in the stream assert idx < obj_stm["/N"] - stream_data = BytesIO(b_(obj_stm.get_data())) + stream_data = BytesIO(obj_stm.get_data()) for i in range(obj_stm["/N"]): # type: ignore read_non_whitespace(stream_data) stream_data.seek(-1, 1) @@ -932,7 +931,7 @@ def _read_pdf15_xref_stream( xrefstream = cast(ContentStream, read_object(stream, self)) assert cast(str, xrefstream["/Type"]) == "/XRef" self.cache_indirect_object(generation, idnum, xrefstream) - stream_data = BytesIO(b_(xrefstream.get_data())) + stream_data = BytesIO(xrefstream.get_data()) # Index pairs specify the subsections in the dictionary. If # none create one subsection that spans everything. idx_pairs = xrefstream.get("/Index", [0, xrefstream.get("/Size")]) diff --git a/pypdf/_utils.py b/pypdf/_utils.py index 5fecb38e7d..94d45cf6d7 100644 --- a/pypdf/_utils.py +++ b/pypdf/_utils.py @@ -336,31 +336,6 @@ def mark_location(stream: StreamType) -> None: stream.seek(-radius, 1) -B_CACHE: Dict[str, bytes] = {} - - -def b_(s: Union[str, bytes]) -> bytes: - if isinstance(s, bytes): - return s - bc = B_CACHE - if s in bc: - return bc[s] - try: - r = s.encode("latin-1") - except UnicodeEncodeError: - r = s.encode("utf-8") - if len(s) < 2: - bc[s] = r - return r - - -def str_(b: Any) -> str: - if isinstance(b, bytes): - return b.decode("latin-1") - else: - return str(b) # will return b.__str__() if defined - - @overload def ord_(b: str) -> int: ... @@ -397,12 +372,17 @@ def deprecation(msg: str) -> None: def deprecate_with_replacement(old_name: str, new_name: str, removed_in: str) -> None: """Raise an exception that a feature will be removed, but has a replacement.""" - deprecate(f"{old_name} is deprecated and will be removed in pypdf {removed_in}. Use {new_name} instead.", 4) + deprecate( + f"{old_name} is deprecated and will be removed in pypdf {removed_in}. Use {new_name} instead.", + 4, + ) def deprecation_with_replacement(old_name: str, new_name: str, removed_in: str) -> None: """Raise an exception that a feature was already removed, but has a replacement.""" - deprecation(f"{old_name} is deprecated and was removed in pypdf {removed_in}. Use {new_name} instead.") + deprecation( + f"{old_name} is deprecated and was removed in pypdf {removed_in}. Use {new_name} instead." + ) def deprecate_no_replacement(name: str, removed_in: str) -> None: diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 00b9d498c0..d73c00e3d4 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -62,7 +62,6 @@ StrByteType, StreamType, _get_max_pdf_version_header, - b_, deprecate_with_replacement, logger_warning, ) @@ -678,9 +677,10 @@ def add_attachment(self, filename: str, data: Union[str, bytes]) -> None: # Hello world! # endstream # endobj - + if isinstance(data, str): + data = data.encode("latin-1") file_entry = DecodedStreamObject() - file_entry.set_data(b_(data)) + file_entry.set_data(data) file_entry.update({NameObject(PA.TYPE): NameObject("/EmbeddedFile")}) # The Filespec entry diff --git a/pypdf/filters.py b/pypdf/filters.py index 137e3603a3..43730cc8e9 100644 --- a/pypdf/filters.py +++ b/pypdf/filters.py @@ -43,7 +43,7 @@ from ._utils import ( WHITESPACES_AS_BYTES, - b_, + deprecate, deprecate_with_replacement, deprecation_no_replacement, logger_warning, @@ -376,20 +376,18 @@ class LZWDecode: """ Taken from: - http://www.java2s.com/Open-Source/Java-Document/PDF/PDF- - Renderer/com/sun/pdfview/decode/LZWDecode.java.htm + http://www.java2s.com/Open-Source/Java-Document/PDF/PDF-Renderer/com/sun/pdfview/decode/LZWDecode.java.htm """ class Decoder: + STOP = 257 + CLEARDICT = 256 + def __init__(self, data: bytes) -> None: - self.STOP = 257 - self.CLEARDICT = 256 self.data = data self.bytepos = 0 self.bitpos = 0 - self.dict = [""] * 4096 - for i in range(256): - self.dict[i] = chr(i) + self.dict = [struct.pack("B", i) for i in range(256)] + [b""] * (4096 - 256) self.reset_dict() def reset_dict(self) -> None: @@ -416,7 +414,7 @@ def next_code(self) -> int: self.bytepos = self.bytepos + 1 return value - def decode(self) -> str: + def decode(self) -> bytes: """ TIFF 6.0 specification explains in sufficient details the steps to implement the LZW encode() and decode() algorithms. @@ -429,7 +427,7 @@ def decode(self) -> str: PdfReadError: If the stop code is missing """ cW = self.CLEARDICT - baos = "" + baos = b"" while True: pW = cW cW = self.next_code() @@ -444,11 +442,11 @@ def decode(self) -> str: else: if cW < self.dictlen: baos += self.dict[cW] - p = self.dict[pW] + self.dict[cW][0] + p = self.dict[pW] + self.dict[cW][0:1] self.dict[self.dictlen] = p self.dictlen += 1 else: - p = self.dict[pW] + self.dict[pW][0] + p = self.dict[pW] + self.dict[pW][0:1] baos += p self.dict[self.dictlen] = p self.dictlen += 1 @@ -460,11 +458,11 @@ def decode(self) -> str: return baos @staticmethod - def decode( + def _decodeb( data: bytes, decode_parms: Optional[DictionaryObject] = None, **kwargs: Any, - ) -> str: + ) -> bytes: """ Decode an LZW encoded data stream. @@ -476,9 +474,28 @@ def decode( decoded data. """ # decode_parms is unused here - return LZWDecode.Decoder(data).decode() + @staticmethod + def decode( + data: bytes, + decode_parms: Optional[DictionaryObject] = None, + **kwargs: Any, + ) -> str: # deprecated + """ + Decode an LZW encoded data stream. + + Args: + data: ``bytes`` or ``str`` text to decode. + decode_parms: a dictionary of parameter values. + + Returns: + decoded data. + """ + # decode_parms is unused here + deprecate("LZWDecode.decode will return bytes instead of str in pypdf 6.0.0") + return LZWDecode.Decoder(data).decode().decode("latin-1") + class ASCII85Decode: """Decodes string ASCII85-encoded data into a byte format.""" @@ -651,7 +668,7 @@ def decode( return tiff_header + data -def decode_stream_data(stream: Any) -> Union[bytes, str]: # utils.StreamObject +def decode_stream_data(stream: Any) -> bytes: # utils.StreamObject """ Decode the stream data based on the specified filters. @@ -678,7 +695,7 @@ def decode_stream_data(stream: Any) -> Union[bytes, str]: # utils.StreamObject decodparms = stream.get(SA.DECODE_PARMS, ({},) * len(filters)) if not isinstance(decodparms, (list, tuple)): decodparms = (decodparms,) - data: bytes = b_(stream._data) + data: bytes = stream._data # If there is not data to decode we should not try to decode the data. if data: for filter_type, params in zip(filters, decodparms): @@ -691,7 +708,7 @@ def decode_stream_data(stream: Any) -> Union[bytes, str]: # utils.StreamObject elif filter_type in (FT.RUN_LENGTH_DECODE, FTA.RL): data = RunLengthDecode.decode(data) elif filter_type in (FT.LZW_DECODE, FTA.LZW): - data = LZWDecode.decode(data, params) # type: ignore + data = LZWDecode._decodeb(data, params) elif filter_type in (FT.ASCII_85_DECODE, FTA.A85): data = ASCII85Decode.decode(data) elif filter_type == FT.DCT_DECODE: diff --git a/pypdf/generic/_base.py b/pypdf/generic/_base.py index 2d606b4184..309d389cc8 100644 --- a/pypdf/generic/_base.py +++ b/pypdf/generic/_base.py @@ -30,18 +30,17 @@ import re from binascii import unhexlify from math import log10 +from struct import iter_unpack from typing import Any, Callable, ClassVar, Dict, Optional, Sequence, Union, cast from .._codecs import _pdfdoc_encoding_rev from .._protocols import PdfObjectProtocol, PdfWriterProtocol from .._utils import ( StreamType, - b_, deprecate_no_replacement, logger_warning, read_non_whitespace, read_until_regex, - str_, ) from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfReadError, PdfStreamError @@ -308,6 +307,10 @@ def __getitem__(self, key: Any) -> Any: # items should be extracted from pointed Object return self._get_object_with_check()[key] # type: ignore + def __float__(self) -> str: + # in this case we are looking for the pointed data + return self.get_object().__float__() # type: ignore + def __str__(self) -> str: # in this case we are looking for the pointed data return self.get_object().__str__() @@ -369,10 +372,10 @@ def read_from_stream(stream: StreamType, pdf: Any) -> "IndirectObject": # PdfRe class FloatObject(float, PdfObject): def __new__( - cls, value: Union[str, Any] = "0.0", context: Optional[Any] = None + cls, value: Any = "0.0", context: Optional[Any] = None ) -> "FloatObject": try: - value = float(str_(value)) + value = float(value) return float.__new__(cls, value) except Exception as e: # If this isn't a valid decimal (happens in malformed PDFs) @@ -599,15 +602,16 @@ def write_to_stream( ) bytearr = self.get_encoded_bytes() stream.write(b"(") - for c in bytearr: - if not chr(c).isalnum() and c != b" ": + for c_ in iter_unpack("c", bytearr): + c = cast(bytes, c_[0]) + if not c.isalnum() and c != b" ": # This: # stream.write(rf"\{c:0>3o}".encode()) # gives # https://github.com/davidhalter/parso/issues/207 - stream.write(("\\%03o" % c).encode()) + stream.write(b"\\%03o" % ord(c)) else: - stream.write(b_(chr(c))) + stream.write(c) stream.write(b")") @@ -710,12 +714,13 @@ def read_from_stream(stream: StreamType, pdf: Any) -> "NameObject": # PdfReader def encode_pdfdocencoding(unicode_string: str) -> bytes: - retval = bytearray() - for c in unicode_string: - try: - retval += b_(chr(_pdfdoc_encoding_rev[c])) - except KeyError: - raise UnicodeEncodeError( - "pdfdocencoding", c, -1, -1, "does not exist in translation table" - ) - return bytes(retval) + try: + return bytes([_pdfdoc_encoding_rev[k] for k in unicode_string]) + except KeyError: + raise UnicodeEncodeError( + "pdfdocencoding", + unicode_string, + -1, + -1, + "does not exist in translation table", + ) diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index 87d6886742..399836be5f 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -52,7 +52,6 @@ from .._utils import ( WHITESPACES, StreamType, - b_, deprecate_no_replacement, deprecate_with_replacement, logger_warning, @@ -843,7 +842,7 @@ def _reset_node_tree_relationship(child_obj: Any) -> None: class StreamObject(DictionaryObject): def __init__(self) -> None: - self._data: Union[bytes, str] = b"" + self._data: bytes = b"" self.decoded_self: Optional[DecodedStreamObject] = None def _clone( @@ -877,7 +876,7 @@ def _clone( pass super()._clone(src, pdf_dest, force_duplicate, ignore_fields, visited) - def get_data(self) -> Union[bytes, str]: + def get_data(self) -> bytes: return self._data def set_data(self, data: bytes) -> None: @@ -885,7 +884,7 @@ def set_data(self, data: bytes) -> None: def hash_value_data(self) -> bytes: data = super().hash_value_data() - data += b_(self._data) + data += self._data return data def write_to_stream( @@ -955,7 +954,7 @@ def flate_encode(self, level: int = -1) -> "EncodedStreamObject": retval[NameObject(SA.FILTER)] = f if params is not None: retval[NameObject(SA.DECODE_PARMS)] = params - retval._data = FlateDecode.encode(b_(self._data), level) + retval._data = FlateDecode.encode(self._data, level) return retval def decode_as_image(self) -> Any: @@ -993,7 +992,7 @@ def __init__(self) -> None: self.decoded_self: Optional[DecodedStreamObject] = None # This overrides the parent method: - def get_data(self) -> Union[bytes, str]: + def get_data(self) -> bytes: from ..filters import decode_stream_data if self.decoded_self is not None: @@ -1003,7 +1002,7 @@ def get_data(self) -> Union[bytes, str]: # create decoded object decoded = DecodedStreamObject() - decoded.set_data(b_(decode_stream_data(self))) + decoded.set_data(decode_stream_data(self)) for key, value in list(self.items()): if key not in (SA.LENGTH, SA.FILTER, SA.DECODE_PARMS): decoded[key] = value @@ -1058,7 +1057,7 @@ def __init__( # The inner list has two elements: # Element 0: List # Element 1: str - self._operations: List[Tuple[Any, Any]] = [] + self._operations: List[Tuple[Any, bytes]] = [] # stream may be a StreamObject or an ArrayObject containing # multiple StreamObjects to be cat'd together. @@ -1069,14 +1068,14 @@ def __init__( if isinstance(stream, ArrayObject): data = b"" for s in stream: - data += b_(s.get_object().get_data()) + data += s.get_object().get_data() if len(data) == 0 or data[-1] != b"\n": data += b"\n" super().set_data(bytes(data)) else: stream_data = stream.get_data() assert stream_data is not None - super().set_data(b_(stream_data)) + super().set_data(stream_data) self.forced_encoding = forced_encoding def clone( @@ -1132,7 +1131,7 @@ def _clone( ignore_fields: """ src_cs = cast("ContentStream", src) - super().set_data(b_(src_cs._data)) + super().set_data(src_cs._data) self.pdf = pdf_dest self._operations = list(src_cs._operations) self.forced_encoding = src_cs.forced_encoding @@ -1249,10 +1248,10 @@ def get_data(self) -> bytes: for op in operands: op.write_to_stream(new_data) new_data.write(b" ") - new_data.write(b_(operator)) + new_data.write(operator) new_data.write(b"\n") self._data = new_data.getvalue() - return b_(self._data) + return self._data # This overrides the parent method: def set_data(self, data: bytes) -> None: @@ -1262,21 +1261,21 @@ def set_data(self, data: bytes) -> None: @property def operations(self) -> List[Tuple[Any, Any]]: if not self._operations and self._data: - self._parse_content_stream(BytesIO(b_(self._data))) + self._parse_content_stream(BytesIO(self._data)) self._data = b"" return self._operations @operations.setter - def operations(self, operations: List[Tuple[Any, Any]]) -> None: + def operations(self, operations: List[Tuple[Any, bytes]]) -> None: self._operations = operations self._data = b"" def isolate_graphics_state(self) -> None: if self._operations: - self._operations.insert(0, ([], "q")) - self._operations.append(([], "Q")) + self._operations.insert(0, ([], b"q")) + self._operations.append(([], b"Q")) elif self._data: - self._data = b"q\n" + b_(self._data) + b"\nQ\n" + self._data = b"q\n" + self._data + b"\nQ\n" # This overrides the parent method: def write_to_stream( diff --git a/pypdf/generic/_utils.py b/pypdf/generic/_utils.py index fdcdc33399..b5ac6632ac 100644 --- a/pypdf/generic/_utils.py +++ b/pypdf/generic/_utils.py @@ -2,7 +2,7 @@ from typing import Dict, List, Tuple, Union from .._codecs import _pdfdoc_encoding -from .._utils import StreamType, b_, logger_warning, read_non_whitespace +from .._utils import StreamType, logger_warning, read_non_whitespace from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfStreamError from ._base import ByteStringObject, TextStringObject @@ -16,7 +16,7 @@ def read_hex_string_from_stream( forced_encoding: Union[None, str, List[str], Dict[int, str]] = None, ) -> Union["TextStringObject", "ByteStringObject"]: stream.read(1) - txt = "" + arr = [] x = b"" while True: tok = read_non_whitespace(stream) @@ -26,13 +26,37 @@ def read_hex_string_from_stream( break x += tok if len(x) == 2: - txt += chr(int(x, base=16)) + arr.append(int(x, base=16)) x = b"" if len(x) == 1: x += b"0" - if len(x) == 2: - txt += chr(int(x, base=16)) - return create_string_object(b_(txt), forced_encoding) + if x != b"": + arr.append(int(x, base=16)) + return create_string_object(bytes(arr), forced_encoding) + + +__ESPACE_DICT__ = { + b"n": ord(b"\n"), + b"r": ord(b"\r"), + b"t": ord(b"\t"), + b"b": ord(b"\b"), + b"f": ord(b"\f"), + b"(": ord(b"("), + b")": ord(b")"), + b"/": ord(b"/"), + b"\\": ord(b"\\"), + b" ": ord(b" "), + b"%": ord(b"%"), + b"<": ord(b"<"), + b">": ord(b">"), + b"[": ord(b"["), + b"]": ord(b"]"), + b"#": ord(b"#"), + b"_": ord(b"_"), + b"&": ord(b"&"), + b"$": ord(b"$"), +} +__BACKSLASH_CODE__ = 92 def read_string_from_stream( @@ -54,30 +78,9 @@ def read_string_from_stream( break elif tok == b"\\": tok = stream.read(1) - escape_dict = { - b"n": b"\n", - b"r": b"\r", - b"t": b"\t", - b"b": b"\b", - b"f": b"\f", - b"c": rb"\c", - b"(": b"(", - b")": b")", - b"/": b"/", - b"\\": b"\\", - b" ": b" ", - b"%": b"%", - b"<": b"<", - b">": b">", - b"[": b"[", - b"]": b"]", - b"#": b"#", - b"_": b"_", - b"&": b"&", - b"$": b"$", - } try: - tok = escape_dict[tok] + txt.append(__ESPACE_DICT__[tok]) + continue except KeyError: if b"0" <= tok <= b"7": # "The number ddd may consist of one, two, or three @@ -85,6 +88,7 @@ def read_string_from_stream( # Three octal digits shall be used, with leading zeros # as needed, if the next character of the string is also # a digit." (PDF reference 7.3.4.2, p 16) + sav = stream.tell() - 1 for _ in range(2): ntok = stream.read(1) if b"0" <= ntok <= b"7": @@ -92,7 +96,13 @@ def read_string_from_stream( else: stream.seek(-1, 1) # ntok has to be analyzed break - tok = b_(chr(int(tok, base=8))) + i = int(tok, base=8) + if i > 255: + txt.append(__BACKSLASH_CODE__) + stream.seek(sav) + else: + txt.append(i) + continue elif tok in b"\n\r": # This case is hit when a backslash followed by a line # break occurs. If it's a multi-char EOL, consume the @@ -102,12 +112,13 @@ def read_string_from_stream( stream.seek(-1, 1) # Then don't add anything to the actual string, since this # line break was escaped: - tok = b"" + continue else: msg = f"Unexpected escaped string: {tok.decode('utf-8','ignore')}" logger_warning(msg, __name__) - txt.append(tok) - return create_string_object(b"".join(txt), forced_encoding) + txt.append(__BACKSLASH_CODE__) + txt.append(ord(tok)) + return create_string_object(bytes(txt), forced_encoding) def create_string_object( diff --git a/resources/Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf b/resources/Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf index 0e9633ac16c138eeaa90d3cf13e9f7cd6e2c006d..a53f28f0be432c38a1fff33672a2170eeb5f553f 100644 GIT binary patch delta 1135 zcmZuwJ5B>Z3>Bn7L8-XFHtGEtkL~mzB#PXGI^7lsK}(f3N1%gya3N-+5Rx~UZB|eA z_*(w%S~Mtv1*EGs-~I8J>sv{zoZ`BgRCDn2^;DWAA#+qhOsy z9_nuY2iNV#)9h}noP_mNYj;O2#kxAbfL+j&#EQMi$qgIN%*aCBkgS9v3tOTYrFns* zi?DXRqIyy{%_l0Z{g4pthD0$;#wiU+yHv)c!>vmue00SNBRm4^N)hR?Vv~jK{H4mw ziIFg?VZq^h6P&9s+GXO#zoHdee$2JJpxz|Hl_r{ zY(@&UHcToGu}TFAn%96`U{@PYmtn8OwFX4O-WCKhW)JHKw%|G;shLP7tsbKDqy5qZ aX;Lhg{J|DdMd<#DhbONh0xtWZ9J{UqF&k&I6)7fzU zDDp4D(!te>XPgE={aR6UiiW`%ZfqTeiIj@xjiPk$e@q9UWn5rLy=@eWjv@^6B7z!u zVL|uoObDgBOUxb4d9AVd3tS3uCsM>HdG}UKHtl4X<1DUS`o&!~`3?a}@xUY!MBL9r zrG8xWxg)aX>X2IbX&d=ezp1ic;>(#79o%KUaWwR|p2uxbk YRyD8Bjbb46Z8kjJvC(vTJUbbE19m{nQvd(} diff --git a/tests/test_cmap.py b/tests/test_cmap.py index 9dcfb252d5..69f2931f67 100644 --- a/tests/test_cmap.py +++ b/tests/test_cmap.py @@ -1,13 +1,19 @@ """Test the pypdf_cmap module.""" from io import BytesIO +from pathlib import Path import pytest -from pypdf import PdfReader +from pypdf import PdfReader, PdfWriter from pypdf._cmap import build_char_map +from pypdf.generic import ArrayObject, NameObject, NullObject from . import get_data_from_url +TESTS_ROOT = Path(__file__).parent.resolve() +PROJECT_ROOT = TESTS_ROOT.parent +RESOURCE_ROOT = PROJECT_ROOT / "resources" + @pytest.mark.enable_socket() @pytest.mark.slow() @@ -206,3 +212,13 @@ def test_eten_b5(): """Issue #2356""" reader = PdfReader(BytesIO(get_data_from_url(name="iss2290.pdf"))) reader.pages[0].extract_text().startswith("1/7 \n富邦新終身壽險") + + +def test_null_missing_width(): + """For coverage of 2792""" + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf") + page = writer.pages[0] + ft = page["/Resources"]["/Font"]["/F1"] + ft[NameObject("/Widths")] = ArrayObject() + ft["/FontDescriptor"][NameObject("/MissingWidth")] = NullObject() + page.extract_text() diff --git a/tests/test_page.py b/tests/test_page.py index cb7b6c723f..72df648e45 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -1131,9 +1131,9 @@ def test_merge_page_resources_smoke_test(): # use these keys for some "operations", to validate renaming # (the operand name doesn't matter) contents1 = page1[NO("/Contents")] = ContentStream(None, None) - contents1.operations = [(ArrayObject(props1.keys()), "page1-contents")] + contents1.operations = [(ArrayObject(props1.keys()), b"page1-contents")] contents2 = page2[NO("/Contents")] = ContentStream(None, None) - contents2.operations = [(ArrayObject(props2.keys()), "page2-contents")] + contents2.operations = [(ArrayObject(props2.keys()), b"page2-contents")] expected_properties = { "/just1": "/just1-value", @@ -1438,3 +1438,12 @@ def test_negative_index(): src_abs = RESOURCE_ROOT / "git.pdf" reader = PdfReader(src_abs) assert reader.pages[0] == reader.pages[-1] + + +def test_get_contents_as_bytes(): + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf") + co = writer.pages[0]["/Contents"][0] + expected = co.get_data() + assert writer.pages[0]._get_contents_as_bytes() == expected + writer.pages[0][NameObject("/Contents")] = writer.pages[0]["/Contents"][0] + assert writer.pages[0]._get_contents_as_bytes() == expected diff --git a/tests/test_utils.py b/tests/test_utils.py index 856bedd863..a4ddff8831 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -109,22 +109,6 @@ def test_mark_location(): Path("pypdf_pdfLocation.txt").unlink() # cleanup -@pytest.mark.parametrize( - ("input_str", "expected"), - [ - ("foo", b"foo"), - ("😀", "😀".encode()), - ("‰", "‰".encode()), - ("▷", "▷".encode()), - ("世", "世".encode()), - # A multi-character string example with non-latin-1 characters: - ("😀😃", "😀😃".encode()), - ], -) -def test_b(input_str: str, expected: bytes): - assert pypdf._utils.b_(input_str) == expected - - def test_deprecate_no_replacement(): with pytest.warns(DeprecationWarning) as warn: pypdf._utils.deprecate_no_replacement("foo", removed_in="3.0.0") diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 93bc0c9e5e..4407b8fd5e 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -391,11 +391,11 @@ def test_merge(tmp_path, url, name): { "/Author": "Unknown", "/CreationDate": "Thursday, May 06, 1999 3:56:54 PM", - "/Creator": "C:DEBÆł8", + "/Creator": r"C:\DEB\6338", "/Keywords": "", "/Producer": "Acrobat PDFWriter 3.02 for Windows", "/Subject": "", - "/Title": "C:DEBÆł8-6R.PDF", + "/Title": r"C:\DEB\6338-6R.PDF", }, ) ], diff --git a/tests/test_writer.py b/tests/test_writer.py index 9dfeffdd89..84d84d0db6 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -1290,7 +1290,7 @@ def test_attachments(): to_add = [ ("foobar.txt", b"foobarcontent"), ("foobar2.txt", b"foobarcontent2"), - ("foobar2.txt", b"2nd_foobarcontent"), + ("foobar2.txt", "2nd_foobarcontent"), ] for name, content in to_add: writer.add_attachment(name, content) From a9758ae1736adc51cc9bdc120b11a6d451a17e74 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:08:21 +0200 Subject: [PATCH 11/43] MAINT: Improve test coverage (#2796) --- tests/test_writer.py | 10 ++++++++++ tests/test_xmp.py | 31 ++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/tests/test_writer.py b/tests/test_writer.py index 84d84d0db6..eea1c6a488 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2188,6 +2188,10 @@ def test_replace_object(): reader._replace_object(reader.pages[0].indirect_reference, reader.pages[0]) pg = PageObject.create_blank_page(writer, 1000, 1000) reader._replace_object(reader.pages[0].indirect_reference, pg) + pg = PageObject.create_blank_page(None, 1000, 1000) + pg[NameObject("/Contents")] = writer.pages[0]["/Contents"] + writer._add_object(pg) + writer.add_page(pg) def test_mime_jupyter(): @@ -2300,3 +2304,9 @@ def test_matrix_entry_in_field_annots(): auto_regenerate=False, ) assert "/Matrix" in writer.pages[0]["/Annots"][5].get_object()["/AP"]["/N"] + + +def test_set_need_appearances_writer(): + """Minimal test for coverage""" + writer = PdfWriter() + writer.set_need_appearances_writer() diff --git a/tests/test_xmp.py b/tests/test_xmp.py index f864a9df9d..6615b93c8e 100644 --- a/tests/test_xmp.py +++ b/tests/test_xmp.py @@ -7,7 +7,7 @@ import pypdf.generic import pypdf.xmp -from pypdf import PdfReader +from pypdf import PdfReader, PdfWriter from pypdf.errors import PdfReadError from . import get_data_from_url @@ -42,6 +42,35 @@ def test_read_xmp_metadata_samples(src): } +def test_writer_xmp_metadata_samples(): + writer = PdfWriter(SAMPLE_ROOT / "020-xmp/output_with_metadata_pymupdf.pdf") + xmp = writer.xmp_metadata + assert xmp + assert xmp.dc_contributor == [] + assert xmp.dc_creator == ["John Doe"] + assert xmp.dc_source == "Martin Thoma" # attribute node + assert xmp.dc_description == {"x-default": "This is a text"} + assert xmp.dc_date == [datetime(1990, 4, 28, 0, 0)] + assert xmp.dc_title == {"x-default": "Sample PDF with XMP Metadata"} + assert xmp.custom_properties == { + "Style": "FooBarStyle", + "other": "worlds", + "⏰": "time", + } + co = pypdf.generic.ContentStream(None, None) + co.set_data( + xmp.stream.get_data().replace( + b'dc:source="Martin Thoma"', b'dc:source="Pubpub-Zz"' + ) + ) + writer.xmp_metadata = pypdf.xmp.XmpInformation(co) + b = BytesIO() + writer.write(b) + reader = PdfReader(b) + xmp2 = reader.xmp_metadata + assert xmp2.dc_source == "Pubpub-Zz" + + @pytest.mark.parametrize( ("src", "has_xmp"), [ From cf7fcfd568bb96bb2a3b978a0bd031a18e6d90b7 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Tue, 13 Aug 2024 11:09:11 +0200 Subject: [PATCH 12/43] ENH: Compress PDF files merging identical objects (#2795) Add compress_identical_objects(). Discovered in #2728. Closes #2794. Closes #2768. --- docs/user/file-size.md | 20 +- pypdf/_text_extraction/_layout_mode/_font.py | 22 +- pypdf/_writer.py | 246 ++++++++++--------- pypdf/generic/_base.py | 3 + tests/test_writer.py | 23 ++ 5 files changed, 177 insertions(+), 137 deletions(-) diff --git a/docs/user/file-size.md b/docs/user/file-size.md index 0ee72e37e3..d47ddcc0ed 100644 --- a/docs/user/file-size.md +++ b/docs/user/file-size.md @@ -9,23 +9,17 @@ Some PDF documents contain the same object multiple times. For example, if an image appears three times in a PDF it could be embedded three times. Or it can be embedded once and referenced twice. -This can be done by reading and writing the file: +When adding data to a PdfWriter, the data is copied while respecting the original format. +For example, if two pages include the same image which is duplicated in the source document, the object will be duplicated in the PdfWriter object. -```python -from pypdf import PdfReader, PdfWriter - -reader = PdfReader("big-old-file.pdf") -writer = PdfWriter() +Additionally, when you delete objects in a document, pypdf cannot easily identify whether the objects are used elsewhere or not or if the user wants to keep them in. When writing the PDF file, these objects will be hidden within (part of the file, but not displayed). -for page in reader.pages: - writer.add_page(page) +In order to reduce the file size, use a compression call: `writer.compress_identical_objects(remove_identicals=True, remove_orphans=True)` -if reader.metadata is not None: - writer.add_metadata(reader.metadata) +* `remove_identicals` enables/disables compression merging identical objects. +* `remove_orphans` enables/disables suppression of unused objects. -with open("smaller-new-file.pdf", "wb") as fp: - writer.write(fp) -``` +It is recommended to apply this process just before writing to the file/stream. It depends on the PDF how well this works, but we have seen an 86% file reduction (from 5.7 MB to 0.8 MB) within a real PDF. diff --git a/pypdf/_text_extraction/_layout_mode/_font.py b/pypdf/_text_extraction/_layout_mode/_font.py index 40655b1b22..1d9617d74a 100644 --- a/pypdf/_text_extraction/_layout_mode/_font.py +++ b/pypdf/_text_extraction/_layout_mode/_font.py @@ -44,7 +44,7 @@ def __post_init__(self) -> None: self.font_dictionary["/DescendantFonts"] ): while isinstance(d_font, IndirectObject): - d_font = d_font.get_object() # type: ignore[assignment] + d_font = d_font.get_object() self.font_dictionary["/DescendantFonts"][d_font_idx] = d_font ord_map = { ord(_target): _surrogate @@ -75,7 +75,11 @@ def __post_init__(self) -> None: { ord_map[_cidx]: _width for _cidx, _width in zip( - range(cast(int, start_idx), cast(int, start_idx) + len(width_list), 1), + range( + cast(int, start_idx), + cast(int, start_idx) + len(width_list), + 1, + ), width_list, ) if _cidx in ord_map @@ -83,12 +87,20 @@ def __post_init__(self) -> None: ) skip_count = 1 # check for format (2): `int int int` - elif isinstance(w_next_entry, (int, float)) and isinstance(_w[idx + 2].get_object(), (int, float)): - start_idx, stop_idx, const_width = w_entry, w_next_entry, _w[idx + 2].get_object() + elif isinstance(w_next_entry, (int, float)) and isinstance( + _w[idx + 2].get_object(), (int, float) + ): + start_idx, stop_idx, const_width = ( + w_entry, + w_next_entry, + _w[idx + 2].get_object(), + ) self.width_map.update( { ord_map[_cidx]: const_width - for _cidx in range(cast(int, start_idx), cast(int, stop_idx + 1), 1) + for _cidx in range( + cast(int, start_idx), cast(int, stop_idx + 1), 1 + ) if _cidx in ord_map } ) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index d73c00e3d4..a72e2a23df 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -27,20 +27,19 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import collections import decimal import enum import hashlib import re import uuid from io import BytesIO, FileIO, IOBase +from itertools import compress from pathlib import Path from types import TracebackType from typing import ( IO, Any, Callable, - Deque, Dict, Iterable, List, @@ -62,6 +61,7 @@ StrByteType, StreamType, _get_max_pdf_version_header, + deprecate, deprecate_with_replacement, logger_warning, ) @@ -156,12 +156,17 @@ def __init__( clone_from: Union[None, PdfReader, StrByteType, Path] = None, ) -> None: self._header = b"%PDF-1.3" - self._objects: List[PdfObject] = [] + self._objects: List[Optional[PdfObject]] = [] """The indirect objects in the PDF.""" - self._idnum_hash: Dict[bytes, IndirectObject] = {} - """Maps hash values of indirect objects to their IndirectObject instances.""" + """Maps hash values of indirect objects to the list of IndirectObjects. + This is used for compression. + """ + self._idnum_hash: Dict[bytes, Tuple[IndirectObject, List[IndirectObject]]] = {} + """List of already translated IDs. + dict[id(pdf)][(idnum, generation)] + """ self._id_translated: Dict[int, Dict[int, int]] = {} # The root of our page tree node. @@ -370,10 +375,13 @@ def get_object( indirect_reference: Union[int, IndirectObject], ) -> PdfObject: if isinstance(indirect_reference, int): - return self._objects[indirect_reference - 1] - if indirect_reference.pdf != self: + obj = self._objects[indirect_reference - 1] + elif indirect_reference.pdf != self: raise ValueError("pdf must be self") - return self._objects[indirect_reference.idnum - 1] + else: + obj = self._objects[indirect_reference.idnum - 1] + assert obj is not None # clarification for mypy + return obj def _replace_object( self, @@ -392,7 +400,9 @@ def _replace_object( obj = obj.clone(self) self._objects[indirect_reference - 1] = obj obj.indirect_reference = IndirectObject(indirect_reference, gen, self) - return self._objects[indirect_reference - 1] + + assert isinstance(obj, PdfObject) # clarification for mypy + return obj def _add_page( self, @@ -1242,14 +1252,13 @@ def write_stream(self, stream: StreamType) -> None: "It may not be written to correctly.", __name__, ) + # deprecated to be removed in pypdf 6.0.0 : + # if not self._root: + # self._root = self._add_object(self._root_object) + # self._sweep_indirect_references(self._root) - if not self._root: - self._root = self._add_object(self._root_object) - - self._sweep_indirect_references(self._root) - - object_positions = self._write_pdf_structure(stream) - xref_location = self._write_xref_table(stream, object_positions) + object_positions, free_objects = self._write_pdf_structure(stream) + xref_location = self._write_xref_table(stream, object_positions, free_objects) self._write_trailer(stream, xref_location) def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: @@ -1282,8 +1291,9 @@ def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: return my_file, stream - def _write_pdf_structure(self, stream: StreamType) -> List[int]: + def _write_pdf_structure(self, stream: StreamType) -> Tuple[List[int], List[int]]: object_positions = [] + free_objects = [] # will contain list of all free entries stream.write(self.pdf_header.encode() + b"\n") stream.write(b"%\xE2\xE3\xCF\xD3\n") @@ -1296,15 +1306,26 @@ def _write_pdf_structure(self, stream: StreamType) -> List[int]: obj = self._encryption.encrypt_object(obj, idnum, 0) obj.write_to_stream(stream) stream.write(b"\nendobj\n") - return object_positions - - def _write_xref_table(self, stream: StreamType, object_positions: List[int]) -> int: + else: + object_positions.append(-1) + free_objects.append(i + 1) + free_objects.append(0) # add 0 to loop in accordance with PDF spec + return object_positions, free_objects + + def _write_xref_table( + self, stream: StreamType, object_positions: List[int], free_objects: List[int] + ) -> int: xref_location = stream.tell() stream.write(b"xref\n") stream.write(f"0 {len(self._objects) + 1}\n".encode()) - stream.write(f"{0:0>10} {65535:0>5} f \n".encode()) + stream.write(f"{free_objects[0]:0>10} {65535:0>5} f \n".encode()) + free_idx = 1 for offset in object_positions: - stream.write(f"{offset:0>10} {0:0>5} n \n".encode()) + if offset > 0: + stream.write(f"{offset:0>10} {0:0>5} n \n".encode()) + else: + stream.write(f"{free_objects[free_idx]:0>10} {1:0>5} f \n".encode()) + free_idx += 1 return xref_location def _write_trailer(self, stream: StreamType, xref_location: int) -> None: @@ -1349,6 +1370,79 @@ def add_metadata(self, infos: Dict[str, Any]) -> None: assert isinstance(self._info, DictionaryObject) self._info.update(args) + def compress_identical_objects( + self, + remove_identicals: bool = True, + remove_orphans: bool = True, + ) -> None: + """ + Parse the PDF file and merge objects that have same hash. + This will make objects common to multiple pages. + Recommended to be used just before writing output. + + Args: + remove_identicals: Remove identical objects. + remove_orphans: Remove unreferenced objects. + """ + + def replace_in_obj( + obj: PdfObject, crossref: Dict[IndirectObject, IndirectObject] + ) -> None: + if isinstance(obj, DictionaryObject): + key_val = obj.items() + elif isinstance(obj, ArrayObject): + key_val = enumerate(obj) # type: ignore + else: + return + assert isinstance(obj, (DictionaryObject, ArrayObject)) + for k, v in key_val: + if isinstance(v, IndirectObject): + orphans[v.idnum - 1] = False + if v in crossref: + obj[k] = crossref[v] + else: + """the filtering on DictionaryObject and ArrayObject only + will be performed within replace_in_obj""" + replace_in_obj(v, crossref) + + # _idnum_hash :dict[hash]=(1st_ind_obj,[other_indir_objs,...]) + self._idnum_hash = {} + orphans = [True] * len(self._objects) + # look for similar objects + for idx, obj in enumerate(self._objects): + if obj is None: + continue + assert isinstance(obj.indirect_reference, IndirectObject) + h = obj.hash_value() + if remove_identicals and h in self._idnum_hash: + self._idnum_hash[h][1].append(obj.indirect_reference) + self._objects[idx] = None + else: + self._idnum_hash[h] = (obj.indirect_reference, []) + + # generate the dict converting others to 1st + cnv = {v[0]: v[1] for v in self._idnum_hash.values() if len(v[1]) > 0} + cnv_rev: Dict[IndirectObject, IndirectObject] = {} + for k, v in cnv.items(): + cnv_rev.update(zip(v, (k,) * len(v))) + + # replace reference to merged objects + for obj in self._objects: + if isinstance(obj, (DictionaryObject, ArrayObject)): + replace_in_obj(obj, cnv_rev) + + # remove orphans (if applicable) + orphans[self.root_object.indirect_reference.idnum - 1] = False # type: ignore + + orphans[self._info.indirect_reference.idnum - 1] = False # type: ignore + + try: + orphans[self._ID.indirect_reference.idnum - 1] = False # type: ignore + except AttributeError: + pass + for i in compress(range(len(self._objects)), orphans): + self._objects[i] = None + def _sweep_indirect_references( self, root: Union[ @@ -1363,7 +1457,7 @@ def _sweep_indirect_references( TextStringObject, NullObject, ], - ) -> None: + ) -> None: # deprecated """ Resolving any circular references to Page objects. @@ -1379,73 +1473,13 @@ def _sweep_indirect_references( Args: root: The root of the PDF object tree to sweep. """ - stack: Deque[ - Tuple[ - Any, - Optional[Any], - Any, - List[PdfObject], - ] - ] = collections.deque() - discovered = [] - parent = None - grant_parents: List[PdfObject] = [] - key_or_id = None - - # Start from root - stack.append((root, parent, key_or_id, grant_parents)) - - while len(stack): - data, parent, key_or_id, grant_parents = stack.pop() - - # Build stack for a processing depth-first - if isinstance(data, (ArrayObject, DictionaryObject)): - for key, value in data.items(): - stack.append( - ( - value, - data, - key, - grant_parents + [parent] if parent is not None else [], - ) - ) - elif isinstance(data, IndirectObject) and data.pdf != self: - data = self._resolve_indirect_object(data) - - if str(data) not in discovered: - discovered.append(str(data)) - stack.append((data.get_object(), None, None, [])) - - # Check if data has a parent and if it is a dict or - # an array update the value - if isinstance(parent, (DictionaryObject, ArrayObject)): - if isinstance(data, StreamObject): - # a dictionary value is a stream; streams must be indirect - # objects, so we need to change this value. - data = self._resolve_indirect_object(self._add_object(data)) - - update_hashes = [] - - # Data changed and thus the hash value changed - if parent[key_or_id] != data: - update_hashes = [parent.hash_value()] + [ - grant_parent.hash_value() for grant_parent in grant_parents - ] - parent[key_or_id] = data - - # Update old hash value to new hash value - for old_hash in update_hashes: - indirect_reference = self._idnum_hash.pop(old_hash, None) - - if indirect_reference is not None: - indirect_reference_obj = indirect_reference.get_object() - - if indirect_reference_obj is not None: - self._idnum_hash[ - indirect_reference_obj.hash_value() - ] = indirect_reference + deprecate( + "_sweep_indirect_references has been removed, please report to dev team if this warning is observed", + ) - def _resolve_indirect_object(self, data: IndirectObject) -> IndirectObject: + def _resolve_indirect_object( + self, data: IndirectObject + ) -> IndirectObject: # deprecated """ Resolves an indirect object to an indirect object in this PDF file. @@ -1470,36 +1504,10 @@ def _resolve_indirect_object(self, data: IndirectObject) -> IndirectObject: Raises: ValueError: If the input stream is closed. """ - if hasattr(data.pdf, "stream") and data.pdf.stream.closed: - raise ValueError(f"I/O operation on closed file: {data.pdf.stream.name}") - - if data.pdf == self: - return data - - # Get real object indirect object - real_obj = data.pdf.get_object(data) - - if real_obj is None: - logger_warning( - f"Unable to resolve [{data.__class__.__name__}: {data}], " - "returning NullObject instead", - __name__, - ) - real_obj = NullObject() - - hash_value = real_obj.hash_value() - - # Check if object is handled - if hash_value in self._idnum_hash: - return self._idnum_hash[hash_value] - - if data.pdf == self: - self._idnum_hash[hash_value] = IndirectObject(data.idnum, 0, self) - # This is new object in this pdf - else: - self._idnum_hash[hash_value] = self._add_object(real_obj) - - return self._idnum_hash[hash_value] + deprecate( + "_resolve_indirect_object has been removed, please report to dev team if this warning is observed", + ) + return IndirectObject(0, 0, self) def get_reference(self, obj: PdfObject) -> IndirectObject: idnum = self._objects.index(obj) + 1 diff --git a/pypdf/generic/_base.py b/pypdf/generic/_base.py index 309d389cc8..9899cb48ca 100644 --- a/pypdf/generic/_base.py +++ b/pypdf/generic/_base.py @@ -239,6 +239,9 @@ def __init__(self, idnum: int, generation: int, pdf: Any) -> None: # PdfReader self.generation = generation self.pdf = pdf + def __hash__(self) -> int: + return hash((self.idnum, self.generation, id(self.pdf))) + def clone( self, pdf_dest: PdfWriterProtocol, diff --git a/tests/test_writer.py b/tests/test_writer.py index eea1c6a488..49fe585385 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2306,6 +2306,29 @@ def test_matrix_entry_in_field_annots(): assert "/Matrix" in writer.pages[0]["/Annots"][5].get_object()["/AP"]["/N"] +@pytest.mark.enable_socket() +def test_compress_identical_objects(): + """Cf #2728 and #2794""" + url = "https://github.com/user-attachments/files/16575458/tt2.pdf" + name = "iss2794.pdf" + in_bytes = BytesIO(get_data_from_url(url, name=name)) + writer = PdfWriter(in_bytes) + writer.compress_identical_objects(remove_orphans=False) + out1 = BytesIO() + writer.write(out1) + assert 0.5 * len(in_bytes.getvalue()) > len(out1.getvalue()) + writer.remove_page( + 1 + ) # page0 contains fields which keep reference to the deleted page + out2 = BytesIO() + writer.write(out2) + assert len(out1.getvalue()) - 100 < len(out2.getvalue()) + writer.compress_identical_objects(remove_identicals=False) + out3 = BytesIO() + writer.write(out3) + assert len(out2.getvalue()) > len(out3.getvalue()) + + def test_set_need_appearances_writer(): """Minimal test for coverage""" writer = PdfWriter() From 2eb565d914f1dea5c9024aa8fb5f2332dd36f7f2 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Tue, 13 Aug 2024 14:21:19 +0200 Subject: [PATCH 13/43] ROB: Fix extract_text() issues on damaged PDFs (#2760) Closes #2702. --- pypdf/_cmap.py | 2 +- tests/test_cmap.py | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/pypdf/_cmap.py b/pypdf/_cmap.py index d635724d25..035850a4a0 100644 --- a/pypdf/_cmap.py +++ b/pypdf/_cmap.py @@ -258,7 +258,7 @@ def prepare_cm(ft: DictionaryObject) -> bytes: cm: bytes if isinstance(tu, StreamObject): cm = cast(DecodedStreamObject, ft["/ToUnicode"]).get_data() - elif isinstance(tu, str) and tu.startswith("/Identity"): + else: # if (tu is None) or cast(str, tu).startswith("/Identity"): # the full range 0000-FFFF will be processed cm = b"beginbfrange\n<0000> <0001> <0000>\nendbfrange" if isinstance(cm, str): diff --git a/tests/test_cmap.py b/tests/test_cmap.py index 69f2931f67..9ec55723fa 100644 --- a/tests/test_cmap.py +++ b/tests/test_cmap.py @@ -6,7 +6,7 @@ from pypdf import PdfReader, PdfWriter from pypdf._cmap import build_char_map -from pypdf.generic import ArrayObject, NameObject, NullObject +from pypdf.generic import ArrayObject, IndirectObject, NameObject, NullObject from . import get_data_from_url @@ -214,6 +214,22 @@ def test_eten_b5(): reader.pages[0].extract_text().startswith("1/7 \n富邦新終身壽險") +@pytest.mark.enable_socket() +def test_missing_entries_in_cmap(): + """ + Issue #2702: this issue is observed on damaged pdfs + use of this file in test has been discarded as too slow/long + we will create the same error from crazyones + """ + pdf_path = RESOURCE_ROOT / "crazyones.pdf" + reader = PdfReader(pdf_path) + p = reader.pages[0] + p["/Resources"]["/Font"]["/F1"][NameObject("/ToUnicode")] = IndirectObject( + 99999999, 0, reader + ) + p.extract_text() + + def test_null_missing_width(): """For coverage of 2792""" writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf") From d9a8c544e9dce3017ce6fc4acc2171bd580ccecf Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Wed, 14 Aug 2024 21:09:33 +0200 Subject: [PATCH 14/43] ENH: Report PdfReadError instead of RecursionError (#2800) Closes #2761. --- pypdf/_doc_common.py | 7 ++++++- pypdf/_reader.py | 5 ++++- tests/test_reader.py | 14 +++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/pypdf/_doc_common.py b/pypdf/_doc_common.py index ffbdb7882e..4f607340db 100644 --- a/pypdf/_doc_common.py +++ b/pypdf/_doc_common.py @@ -1121,7 +1121,12 @@ def _flatten( obj = page.get_object() if obj: # damaged file may have invalid child in /Pages - self._flatten(obj, inherit, **addt) + try: + self._flatten(obj, inherit, **addt) + except RecursionError: + raise PdfReadError( + "Maximum recursion depth reached during page flattening." + ) elif t == "/Page": for attr_in, value in list(inherit.items()): # if the page has it's own value, it does not inherit the diff --git a/pypdf/_reader.py b/pypdf/_reader.py index 7c084107c5..037f4e358d 100644 --- a/pypdf/_reader.py +++ b/pypdf/_reader.py @@ -541,7 +541,10 @@ def read_object_header(self, stream: StreamType) -> Tuple[int, int]: def cache_get_indirect_object( self, generation: int, idnum: int ) -> Optional[PdfObject]: - return self.resolved_objects.get((generation, idnum)) + try: + return self.resolved_objects.get((generation, idnum)) + except RecursionError: + raise PdfReadError("Maximum recursion depth reached.") def cache_indirect_object( self, generation: int, idnum: int, obj: Optional[PdfObject] diff --git a/tests/test_reader.py b/tests/test_reader.py index 0a2a32b81a..c7dc39b30e 100644 --- a/tests/test_reader.py +++ b/tests/test_reader.py @@ -115,7 +115,9 @@ def test_iss1943(): docinfo = reader.metadata docinfo.update( { - NameObject("/CreationDate"): TextStringObject("D:20230705005151Z00'00'"), + NameObject("/CreationDate"): TextStringObject( + "D:20230705005151Z00'00'" + ), NameObject("/ModDate"): TextStringObject("D:20230705005151Z00'00'"), } ) @@ -1577,3 +1579,13 @@ def test_context_manager_with_stream(): with PdfReader(pdf_stream) as reader: assert not reader.stream.closed assert not pdf_stream.closed + + +@pytest.mark.enable_socket() +@pytest.mark.timeout(10) +def test_iss2761(): + url = "https://github.com/user-attachments/files/16312198/crash-b26d05712a29b241ac6f9dc7fff57428ba2d1a04.pdf" + name = "iss2761.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name=name)), strict=False) + with pytest.raises(PdfReadError): + reader.pages[0].extract_text() From 799630daba40fe434406bd59083e8fe736178d1e Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Thu, 15 Aug 2024 14:28:51 +0200 Subject: [PATCH 15/43] BUG: Fix sheared image (#2801) Closes #2411. --- pypdf/_xobj_image_helpers.py | 2 +- tests/test_images.py | 11 +++++++++++ tests/test_workflows.py | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/pypdf/_xobj_image_helpers.py b/pypdf/_xobj_image_helpers.py index 7a3f40d95c..d870b15897 100644 --- a/pypdf/_xobj_image_helpers.py +++ b/pypdf/_xobj_image_helpers.py @@ -122,7 +122,7 @@ def bits2byte(data: bytes, size: Tuple[int, int], bits: int) -> bytes: by = 0 bit = 8 - bits for y in range(size[1]): - if (bit != 0) and (bit != 8 - bits): + if bit != 8 - bits: by += 1 bit = 8 - bits for x in range(size[0]): diff --git a/tests/test_images.py b/tests/test_images.py index 5955bf47c5..5fd7d0968a 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -462,3 +462,14 @@ def test_extract_image_from_object(caplog): co = reader.pages[0].get_contents() co.decode_as_image() assert "does not seem to be an Image" in caplog.text + + +@pytest.mark.enable_socket() +def test_4bits_images(caplog): + url = "https://github.com/user-attachments/files/16624406/tt.pdf" + name = "iss2411.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + url = "https://github.com/user-attachments/assets/53058564-9a28-4e4a-818f-a6528013d7dc" + name = "iss2411.png" + img = Image.open(BytesIO(get_data_from_url(url, name=name))) + assert image_similarity(reader.pages[0].images[1].image, img) == 1.0 diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 4407b8fd5e..1125222fcd 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -412,7 +412,7 @@ def test_get_metadata(url, name, expected_metadata): ("url", "name", "strict", "exception"), [ ( - "https://corpora.tika.apache.org/base/docs/govdocs1/938/938702.pdf", + "https://github.com/user-attachments/files/16624503/tika-938702.pdf", "tika-938702.pdf", False, None, # iss #1090 is now fixed From 454a62a98cace9887cefa843bfb5d659e813cf8b Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:59:29 +0200 Subject: [PATCH 16/43] MAINT: Fix mypy type output (#2799) Closes #2798. --- docs/modules/PageObject.rst | 8 +- pypdf/_page.py | 272 +++++++++++++++++++++++------------- pypdf/_utils.py | 79 ++--------- tests/test_workflows.py | 15 +- 4 files changed, 203 insertions(+), 171 deletions(-) diff --git a/docs/modules/PageObject.rst b/docs/modules/PageObject.rst index 45e81b6ab9..b4524b4437 100644 --- a/docs/modules/PageObject.rst +++ b/docs/modules/PageObject.rst @@ -6,14 +6,12 @@ The PageObject Class :undoc-members: :show-inheritance: -.. autoclass:: pypdf._utils.ImageFile +.. autoclass:: pypdf._page.VirtualListImages :members: :undoc-members: :show-inheritance: - :exclude-members: IndirectObject -.. autoclass:: pypdf._utils.File +.. autoclass:: pypdf._page.ImageFile :members: + :inherited-members: File :undoc-members: - :show-inheritance: - :exclude-members: IndirectObject diff --git a/pypdf/_page.py b/pypdf/_page.py index 48cdeb149f..c51aee1ab0 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -28,7 +28,9 @@ # POSSIBILITY OF SUCH DAMAGE. import math +from dataclasses import dataclass from decimal import Decimal +from io import BytesIO from pathlib import Path from typing import ( Any, @@ -58,9 +60,8 @@ ) from ._utils import ( CompressedTransformationMatrix, - File, - ImageFile, TransformationMatrixType, + _human_readable_bytes, logger_warning, matrix_multiply, ) @@ -85,6 +86,14 @@ StreamObject, ) +try: + from PIL.Image import Image + + pil_not_imported = False +except ImportError: + Image = object # type: ignore + pil_not_imported = True # error will be raised only when using images + MERGE_CROP_BOX = "cropbox" # pypdf<=3.4.0 used 'trimbox' @@ -301,6 +310,160 @@ def apply_on( return list(pt1) if isinstance(pt, list) else pt1 +@dataclass +class ImageFile: + """ + Image within the PDF file. *This object is not designed to be built.* + + This object should not be modified except using :func:`ImageFile.replace` to replace the image with a new one. + """ + + name: str = "" + """ + Filename as identified within the PDF file. + """ + + data: bytes = b"" + """ + Data as bytes. + """ + + image: Optional[Image] = None + """ + Data as PIL image. + """ + + indirect_reference: Optional[IndirectObject] = None + """ + Reference to the object storing the stream. + """ + + def replace(self, new_image: Image, **kwargs: Any) -> None: + """ + Replace the image with a new PIL image. + + Args: + new_image (PIL.Image.Image): The new PIL image to replace the existing image. + **kwargs: Additional keyword arguments to pass to `Image.save()`. + + Raises: + TypeError: If the image is inline or in a PdfReader. + TypeError: If the image does not belong to a PdfWriter. + TypeError: If `new_image` is not a PIL Image. + + Note: + This method replaces the existing image with a new image. + It is not allowed for inline images or images within a PdfReader. + The `kwargs` parameter allows passing additional parameters + to `Image.save()`, such as quality. + """ + if pil_not_imported: + raise ImportError( + "pillow is required to do image extraction. " + "It can be installed via 'pip install pypdf[image]'" + ) + + from ._reader import PdfReader + + # to prevent circular import + from .filters import _xobj_to_image + from .generic import DictionaryObject, PdfObject + + if self.indirect_reference is None: + raise TypeError("Cannot update an inline image.") + if not hasattr(self.indirect_reference.pdf, "_id_translated"): + raise TypeError("Cannot update an image not belonging to a PdfWriter.") + if not isinstance(new_image, Image): + raise TypeError("new_image shall be a PIL Image") + b = BytesIO() + new_image.save(b, "PDF", **kwargs) + reader = PdfReader(b) + assert reader.pages[0].images[0].indirect_reference is not None + self.indirect_reference.pdf._objects[self.indirect_reference.idnum - 1] = ( + reader.pages[0].images[0].indirect_reference.get_object() + ) + cast( + PdfObject, self.indirect_reference.get_object() + ).indirect_reference = self.indirect_reference + # change the object attributes + extension, byte_stream, img = _xobj_to_image( + cast(DictionaryObject, self.indirect_reference.get_object()) + ) + assert extension is not None + self.name = self.name[: self.name.rfind(".")] + extension + self.data = byte_stream + self.image = img + + def __str__(self) -> str: + return f"{self.__class__.__name__}(name={self.name}, data: {_human_readable_bytes(len(self.data))})" + + def __repr__(self) -> str: + return self.__str__()[:-1] + f", hash: {hash(self.data)})" + + +class VirtualListImages(Sequence[ImageFile]): + """ + Provides access to images referenced within a page. + Only one copy will be returned if the usage is used on the same page multiple times. + See :func:`PageObject.images` for more details. + """ + + def __init__( + self, + ids_function: Callable[[], List[Union[str, List[str]]]], + get_function: Callable[[Union[str, List[str], Tuple[str]]], ImageFile], + ) -> None: + self.ids_function = ids_function + self.get_function = get_function + self.current = -1 + + def __len__(self) -> int: + return len(self.ids_function()) + + def keys(self) -> List[Union[str, List[str]]]: + return self.ids_function() + + def items(self) -> List[Tuple[Union[str, List[str]], ImageFile]]: + return [(x, self[x]) for x in self.ids_function()] + + @overload + def __getitem__(self, index: Union[int, str, List[str]]) -> ImageFile: + ... + + @overload + def __getitem__(self, index: slice) -> Sequence[ImageFile]: + ... + + def __getitem__( + self, index: Union[int, slice, str, List[str], Tuple[str]] + ) -> Union[ImageFile, Sequence[ImageFile]]: + lst = self.ids_function() + if isinstance(index, slice): + indices = range(*index.indices(len(self))) + lst = [lst[x] for x in indices] + cls = type(self) + return cls((lambda: lst), self.get_function) + if isinstance(index, (str, list, tuple)): + return self.get_function(index) + if not isinstance(index, int): + raise TypeError("invalid sequence indices type") + len_self = len(lst) + if index < 0: + # support negative indexes + index = len_self + index + if index < 0 or index >= len_self: + raise IndexError("sequence index out of range") + return self.get_function(lst[index]) + + def __iter__(self) -> Iterator[ImageFile]: + for i in range(len(self)): + yield self[i] + + def __str__(self) -> str: + p = [f"Image_{i}={n}" for i, n in enumerate(self.ids_function())] + return f"[{', '.join(p)}]" + + class PageObject(DictionaryObject): """ PageObject represents a single page within a PDF file. @@ -391,33 +554,6 @@ def create_blank_page( return page - @property - def _old_images(self) -> List[File]: # deprecated - """ - Get a list of all images of the page. - - This requires pillow. You can install it via 'pip install pypdf[image]'. - - For the moment, this does NOT include inline images. They will be added - in future. - """ - images_extracted: List[File] = [] - if RES.XOBJECT not in self[PG.RESOURCES]: # type: ignore - return images_extracted - - x_object = self[PG.RESOURCES][RES.XOBJECT].get_object() # type: ignore - for obj in x_object: - if x_object[obj][IA.SUBTYPE] == "/Image": - extension, byte_stream, img = _xobj_to_image(x_object[obj]) - if extension is not None: - filename = f"{obj[1:]}{extension}" - images_extracted.append(File(name=filename, data=byte_stream)) - images_extracted[-1].image = img - images_extracted[-1].indirect_reference = x_object[ - obj - ].indirect_reference - return images_extracted - def _get_ids_image( self, obj: Optional[DictionaryObject] = None, @@ -495,7 +631,7 @@ def _get_image( return self._get_image(ids, cast(DictionaryObject, xobjs[id[0]])) @property - def images(self) -> List[ImageFile]: + def images(self) -> VirtualListImages: """ Read-only property emulating a list of images on a page. @@ -505,20 +641,19 @@ def images(self) -> List[ImageFile]: - An integer Examples: - reader.pages[0].images[0] # return fist image - reader.pages[0].images['/I0'] # return image '/I0' - # return image '/Image1' within '/TP1' Xobject/Form: - reader.pages[0].images['/TP1','/Image1'] - for img in reader.pages[0].images: # loop within all objects + * `reader.pages[0].images[0]` # return fist image + * `reader.pages[0].images['/I0']` # return image '/I0' + * `reader.pages[0].images['/TP1','/Image1']` # return image '/Image1' within '/TP1' Xobject/Form + * `for img in reader.pages[0].images:` # loops through all objects images.keys() and images.items() can be used. The ImageFile has the following properties: - `.name` : name of the object - `.data` : bytes of the object - `.image` : PIL Image Object - `.indirect_reference` : object reference + * `.name` : name of the object + * `.data` : bytes of the object + * `.image` : PIL Image Object + * `.indirect_reference` : object reference and the following methods: `.replace(new_image: PIL.Image.Image, **kwargs)` : @@ -532,7 +667,7 @@ def images(self) -> List[ImageFile]: Inline images are extracted and named ~0~, ~1~, ..., with the indirect_reference set to None. """ - return _VirtualListImages(self._get_ids_image, self._get_image) # type: ignore + return VirtualListImages(self._get_ids_image, self._get_image) def _translate_value_inlineimage(self, k: str, v: PdfObject) -> PdfObject: """Translate values used in inline image""" @@ -2393,60 +2528,3 @@ def process_font(f: DictionaryObject) -> None: for a in cast(DictionaryObject, cast(DictionaryObject, obj["/AP"])["/N"]): _get_fonts_walk(cast(DictionaryObject, a), fnt, emb) return fnt, emb # return the sets for each page - - -class _VirtualListImages(Sequence[ImageFile]): - def __init__( - self, - ids_function: Callable[[], List[Union[str, List[str]]]], - get_function: Callable[[Union[str, List[str], Tuple[str]]], ImageFile], - ) -> None: - self.ids_function = ids_function - self.get_function = get_function - self.current = -1 - - def __len__(self) -> int: - return len(self.ids_function()) - - def keys(self) -> List[Union[str, List[str]]]: - return self.ids_function() - - def items(self) -> List[Tuple[Union[str, List[str]], ImageFile]]: - return [(x, self[x]) for x in self.ids_function()] - - @overload - def __getitem__(self, index: Union[int, str, List[str]]) -> ImageFile: - ... - - @overload - def __getitem__(self, index: slice) -> Sequence[ImageFile]: - ... - - def __getitem__( - self, index: Union[int, slice, str, List[str], Tuple[str]] - ) -> Union[ImageFile, Sequence[ImageFile]]: - lst = self.ids_function() - if isinstance(index, slice): - indices = range(*index.indices(len(self))) - lst = [lst[x] for x in indices] - cls = type(self) - return cls((lambda: lst), self.get_function) - if isinstance(index, (str, list, tuple)): - return self.get_function(index) - if not isinstance(index, int): - raise TypeError("invalid sequence indices type") - len_self = len(lst) - if index < 0: - # support negative indexes - index = len_self + index - if index < 0 or index >= len_self: - raise IndexError("sequence index out of range") - return self.get_function(lst[index]) - - def __iter__(self) -> Iterator[ImageFile]: - for i in range(len(self)): - yield self[i] - - def __str__(self) -> str: - p = [f"Image_{i}={n}" for i, n in enumerate(self.ids_function())] - return f"[{', '.join(p)}]" diff --git a/pypdf/_utils.py b/pypdf/_utils.py index 94d45cf6d7..e0034ccc4e 100644 --- a/pypdf/_utils.py +++ b/pypdf/_utils.py @@ -36,7 +36,7 @@ import warnings from dataclasses import dataclass from datetime import datetime, timezone -from io import DEFAULT_BUFFER_SIZE, BytesIO +from io import DEFAULT_BUFFER_SIZE from os import SEEK_CUR from typing import ( IO, @@ -47,7 +47,6 @@ Pattern, Tuple, Union, - cast, overload, ) @@ -525,10 +524,18 @@ def getter(self, method): # type: ignore # noqa: ANN001, ANN202 class File: from .generic import IndirectObject - name: str - data: bytes - image: Optional[Any] = None # optional ; direct image access - indirect_reference: Optional[IndirectObject] = None # optional ; link to PdfObject + name: str = "" + """ + Filename as identified within the PDF file. + """ + data: bytes = b"" + """ + Data as bytes. + """ + indirect_reference: Optional[IndirectObject] = None + """ + Reference to the object storing the stream. + """ def __str__(self) -> str: return f"{self.__class__.__name__}(name={self.name}, data: {_human_readable_bytes(len(self.data))})" @@ -537,66 +544,6 @@ def __repr__(self) -> str: return self.__str__()[:-1] + f", hash: {hash(self.data)})" -@dataclass -class ImageFile(File): - from .generic import IndirectObject - - image: Optional[Any] = None # optional ; direct PIL image access - indirect_reference: Optional[IndirectObject] = None # optional ; link to PdfObject - - def replace(self, new_image: Any, **kwargs: Any) -> None: - """ - Replace the Image with a new PIL image. - - Args: - new_image (PIL.Image.Image): The new PIL image to replace the existing image. - **kwargs: Additional keyword arguments to pass to `Image.Image.save()`. - - Raises: - TypeError: If the image is inline or in a PdfReader. - TypeError: If the image does not belong to a PdfWriter. - TypeError: If `new_image` is not a PIL Image. - - Note: - This method replaces the existing image with a new image. - It is not allowed for inline images or images within a PdfReader. - The `kwargs` parameter allows passing additional parameters - to `Image.Image.save()`, such as quality. - """ - from PIL import Image - - from ._reader import PdfReader - - # to prevent circular import - from .filters import _xobj_to_image - from .generic import DictionaryObject, PdfObject - - if self.indirect_reference is None: - raise TypeError("Can not update an inline image") - if not hasattr(self.indirect_reference.pdf, "_id_translated"): - raise TypeError("Can not update an image not belonging to a PdfWriter") - if not isinstance(new_image, Image.Image): - raise TypeError("new_image shall be a PIL Image") - b = BytesIO() - new_image.save(b, "PDF", **kwargs) - reader = PdfReader(b) - assert reader.pages[0].images[0].indirect_reference is not None - self.indirect_reference.pdf._objects[self.indirect_reference.idnum - 1] = ( - reader.pages[0].images[0].indirect_reference.get_object() - ) - cast( - PdfObject, self.indirect_reference.get_object() - ).indirect_reference = self.indirect_reference - # change the object attributes - extension, byte_stream, img = _xobj_to_image( - cast(DictionaryObject, self.indirect_reference.get_object()) - ) - assert extension is not None - self.name = self.name[: self.name.rfind(".")] + extension - self.data = byte_stream - self.image = img - - @functools.total_ordering class Version: COMPONENT_PATTERN = re.compile(r"^(\d+)(.*)$") diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 1125222fcd..f01269893d 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -980,7 +980,7 @@ def test_replace_image(tmp_path): # extra tests for coverage with pytest.raises(TypeError) as exc: reader.pages[0].images[0].replace(img) - assert exc.value.args[0] == "Can not update an image not belonging to a PdfWriter" + assert exc.value.args[0] == "Cannot update an image not belonging to a PdfWriter." i = writer.pages[0].images[0] with pytest.raises(TypeError) as exc: i.replace(reader.pages[0].images[0]) # missing .image @@ -988,7 +988,16 @@ def test_replace_image(tmp_path): i.indirect_reference = None # to behave like an inline image with pytest.raises(TypeError) as exc: i.replace(reader.pages[0].images[0].image) - assert exc.value.args[0] == "Can not update an inline image" + assert exc.value.args[0] == "Cannot update an inline image." + + import pypdf + + try: + pypdf._page.pil_not_imported = True + with pytest.raises(ImportError) as exc: + i.replace(reader.pages[0].images[0].image) + finally: + pypdf._page.pil_not_imported = False @pytest.mark.enable_socket() @@ -1015,7 +1024,7 @@ def test_inline_images(): with pytest.raises(TypeError) as exc: reader.pages[0].images[0].replace(img_ref) - assert exc.value.args[0] == "Can not update an inline image" + assert exc.value.args[0] == "Cannot update an inline image." _a = {} for x, y in reader.pages[2].images[0:-2].items(): From 0c81f3cfad26ddffbfc60d0ae855118e515fad8c Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Fri, 16 Aug 2024 11:52:19 +0200 Subject: [PATCH 17/43] ENH: Accept utf strings for metadata (#2802) Closes #2754. --- pypdf/generic/_base.py | 25 +++++++++++++++++++++++-- pypdf/generic/_utils.py | 38 ++++++++++++++++++++++++++++---------- tests/test_generic.py | 13 +++++++++++++ tests/test_writer.py | 21 +++++++++++++++++++++ 4 files changed, 85 insertions(+), 12 deletions(-) diff --git a/pypdf/generic/_base.py b/pypdf/generic/_base.py index 9899cb48ca..f48dc66c38 100644 --- a/pypdf/generic/_base.py +++ b/pypdf/generic/_base.py @@ -517,23 +517,38 @@ class TextStringObject(str, PdfObject): # noqa: SLOT000 autodetect_pdfdocencoding: bool autodetect_utf16: bool utf16_bom: bytes + _original_bytes: Optional[bytes] = None def __new__(cls, value: Any) -> "TextStringObject": + org = None if isinstance(value, bytes): + org = value value = value.decode("charmap") o = str.__new__(cls, value) + o._original_bytes = org o.autodetect_utf16 = False o.autodetect_pdfdocencoding = False o.utf16_bom = b"" if value.startswith(("\xfe\xff", "\xff\xfe")): + assert org is not None # for mypy + try: + o = str.__new__(cls, org.decode("utf-16")) + except UnicodeDecodeError as exc: + logger_warning( + f"{exc!s}\ninitial string:{exc.object!r}", + __name__, + ) + o = str.__new__(cls, exc.object[: exc.start].decode("utf-16")) + o._original_bytes = org o.autodetect_utf16 = True - o.utf16_bom = value[:2].encode("charmap") + o.utf16_bom = org[:2] else: try: encode_pdfdocencoding(o) o.autodetect_pdfdocencoding = True except UnicodeEncodeError: o.autodetect_utf16 = True + o.utf16_bom = codecs.BOM_UTF16_BE return o def clone( @@ -544,6 +559,7 @@ def clone( ) -> "TextStringObject": """Clone object into pdf_dest.""" obj = TextStringObject(self) + obj._original_bytes = self._original_bytes obj.autodetect_pdfdocencoding = self.autodetect_pdfdocencoding obj.autodetect_utf16 = self.autodetect_utf16 obj.utf16_bom = self.utf16_bom @@ -559,7 +575,10 @@ def original_bytes(self) -> bytes: if that occurs, this "original_bytes" property can be used to back-calculate what the original encoded bytes were. """ - return self.get_original_bytes() + if self._original_bytes is not None: + return self._original_bytes + else: + return self.get_original_bytes() def get_original_bytes(self) -> bytes: # We're a text string object, but the library is trying to get our raw @@ -584,6 +603,8 @@ def get_encoded_bytes(self) -> bytes: # nicer to look at in the PDF file. Sadly, we take a performance hit # here for trying... try: + if self._original_bytes is not None: + return self._original_bytes if self.autodetect_utf16: raise UnicodeEncodeError("", "forced", -1, -1, "") bytearr = encode_pdfdocencoding(self) diff --git a/pypdf/generic/_utils.py b/pypdf/generic/_utils.py index b5ac6632ac..6fce6d0b22 100644 --- a/pypdf/generic/_utils.py +++ b/pypdf/generic/_utils.py @@ -148,27 +148,45 @@ def create_string_object( out += forced_encoding[x] except Exception: out += bytes((x,)).decode("charmap") - return TextStringObject(out) + obj = TextStringObject(out) + obj._original_bytes = string + return obj elif isinstance(forced_encoding, str): if forced_encoding == "bytes": return ByteStringObject(string) - return TextStringObject(string.decode(forced_encoding)) + obj = TextStringObject(string.decode(forced_encoding)) + obj._original_bytes = string + return obj else: try: if string.startswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)): retval = TextStringObject(string.decode("utf-16")) + retval._original_bytes = string retval.autodetect_utf16 = True retval.utf16_bom = string[:2] return retval - else: - # This is probably a big performance hit here, but we need - # to convert string objects into the text/unicode-aware - # version if possible... and the only way to check if that's - # possible is to try. - # Some strings are strings, some are just byte arrays. - retval = TextStringObject(decode_pdfdocencoding(string)) - retval.autodetect_pdfdocencoding = True + if string.startswith(b"\x00"): + retval = TextStringObject(string.decode("utf-16be")) + retval._original_bytes = string + retval.autodetect_utf16 = True + retval.utf16_bom = codecs.BOM_UTF16_BE return retval + if string[1:2] == b"\x00": + retval = TextStringObject(string.decode("utf-16le")) + retval._original_bytes = string + retval.autodetect_utf16 = True + retval.utf16_bom = codecs.BOM_UTF16_LE + return retval + + # This is probably a big performance hit here, but we need + # to convert string objects into the text/unicode-aware + # version if possible... and the only way to check if that's + # possible is to try. + # Some strings are strings, some are just byte arrays. + retval = TextStringObject(decode_pdfdocencoding(string)) + retval._original_bytes = string + retval.autodetect_pdfdocencoding = True + return retval except UnicodeDecodeError: return ByteStringObject(string) else: diff --git a/tests/test_generic.py b/tests/test_generic.py index b1079974ef..6b8ae0151c 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -494,6 +494,9 @@ def test_textstringobject_autodetect_utf16(): tso.autodetect_utf16 = True tso.utf16_bom = codecs.BOM_UTF16_BE assert tso.get_original_bytes() == b"\xfe\xff\x00f\x00o\x00o" + tso.utf16_bom = codecs.BOM_UTF16_LE + assert tso.get_original_bytes() == b"\xff\xfef\x00o\x00o\x00" + assert tso.get_encoded_bytes() == b"\xff\xfef\x00o\x00o\x00" def test_remove_child_not_in_tree(): @@ -1131,6 +1134,16 @@ def test_create_string_object_utf16_bom(): result.get_encoded_bytes() == b"\xff\xfeP\x00a\x00p\x00e\x00r\x00P\x00o\x00r\x00t\x00 \x001\x004\x00\x00\x00" ) + result = TextStringObject( + b"\xff\xfeP\x00a\x00p\x00e\x00r\x00P\x00o\x00r\x00t\x00 \x001\x004\x00\x00\x00" + ) + assert result == "PaperPort 14\x00" + assert result.autodetect_utf16 is True + assert result.utf16_bom == b"\xff\xfe" + assert ( + result.get_encoded_bytes() + == b"\xff\xfeP\x00a\x00p\x00e\x00r\x00P\x00o\x00r\x00t\x00 \x001\x004\x00\x00\x00" + ) # utf16-be without bom result = TextStringObject("ÿ") diff --git a/tests/test_writer.py b/tests/test_writer.py index 49fe585385..b6a47a18c8 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2333,3 +2333,24 @@ def test_set_need_appearances_writer(): """Minimal test for coverage""" writer = PdfWriter() writer.set_need_appearances_writer() + + +def test_utf16_metadata(): + """See #2754""" + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf") + writer.add_metadata( + { + "/Subject": "Invoice №AI_047", + } + ) + b = BytesIO() + writer.write(b) + b.seek(0) + reader = PdfReader(b) + assert reader.metadata.subject == "Invoice №AI_047" + bb = b.getvalue() + i = bb.find(b"/Subject") + assert bb[i : i + 100] == ( + b"/Subject (\\376\\377\\000I\\000n\\000v\\000o\\000i\\000c\\000e" + b"\\000 \\041\\026\\000A\\000I\\000\\137\\0000\\0004\\0007)" + ) From d2d520b47264c4f43b79e038d9ac78a2b583f269 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Thu, 22 Aug 2024 05:37:02 +0200 Subject: [PATCH 18/43] MAINT: Remove unused code (#2805) --- pypdf/_reader.py | 21 ++++----------------- tests/test_reader.py | 2 +- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/pypdf/_reader.py b/pypdf/_reader.py index 037f4e358d..1ffcd436d2 100644 --- a/pypdf/_reader.py +++ b/pypdf/_reader.py @@ -33,6 +33,7 @@ from pathlib import Path from types import TracebackType from typing import ( + TYPE_CHECKING, Any, Callable, Dict, @@ -47,7 +48,6 @@ from ._doc_common import PdfDocCommon, convert_to_int from ._encryption import Encryption, PasswordType -from ._page import PageObject from ._utils import ( StrByteType, StreamType, @@ -82,6 +82,9 @@ ) from .xmp import XmpInformation +if TYPE_CHECKING: + from ._page import PageObject + class PdfReader(PdfDocCommon): """ @@ -273,22 +276,6 @@ def xmp_metadata(self) -> Optional[XmpInformation]: finally: self._override_encryption = False - def _get_page(self, page_number: int) -> PageObject: - """ - Retrieve a page by number from this PDF file. - - Args: - page_number: The page number to retrieve - (pages begin at zero) - - Returns: - A :class:`PageObject` instance. - """ - if self.flattened_pages is None: - self._flatten() - assert self.flattened_pages is not None, "hint for mypy" - return self.flattened_pages[page_number] - def _get_page_number_by_indirect( self, indirect_reference: Union[None, int, NullObject, IndirectObject] ) -> Optional[int]: diff --git a/tests/test_reader.py b/tests/test_reader.py index c7dc39b30e..0413a91356 100644 --- a/tests/test_reader.py +++ b/tests/test_reader.py @@ -434,7 +434,7 @@ def test_get_form(src, expected, expected_get_fields, txt_file_path): def test_get_page_number(src, page_number): src = RESOURCE_ROOT / src reader = PdfReader(src) - reader._get_page(0) + reader.get_page(0) page = reader.pages[page_number] assert reader.get_page_number(page) == page_number From 9f08cd0e48114b5788e9c219b443bf75dcdbe251 Mon Sep 17 00:00:00 2001 From: Bertrand Bordage Date: Fri, 23 Aug 2024 07:43:06 +0200 Subject: [PATCH 19/43] ROB: Raise PdfReadError when missing /Root in trailer (#2808) Fixes #2806. --- pypdf/_reader.py | 5 ++++- tests/test_reader.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pypdf/_reader.py b/pypdf/_reader.py index 1ffcd436d2..1452661a5b 100644 --- a/pypdf/_reader.py +++ b/pypdf/_reader.py @@ -190,7 +190,10 @@ def close(self) -> None: @property def root_object(self) -> DictionaryObject: """Provide access to "/Root". Standardized with PdfWriter.""" - return cast(DictionaryObject, self.trailer[TK.ROOT].get_object()) + root = self.trailer[TK.ROOT] + if root is None: + raise PdfReadError('Cannot find "/Root" key in trailer') + return cast(DictionaryObject, root.get_object()) @property def _info(self) -> Optional[DictionaryObject]: diff --git a/tests/test_reader.py b/tests/test_reader.py index 0413a91356..d2394f95d6 100644 --- a/tests/test_reader.py +++ b/tests/test_reader.py @@ -607,9 +607,9 @@ def test_read_unknown_zero_pages(caplog): "startxref on same line as offset", ] assert normalize_warnings(caplog.text) == warnings - with pytest.raises(AttributeError) as exc: + with pytest.raises(PdfReadError) as exc: len(reader.pages) - assert exc.value.args[0] == "'NoneType' object has no attribute 'get_object'" + assert exc.value.args[0] == 'Cannot find "/Root" key in trailer' def test_read_encrypted_without_decryption(): From b7b3c8cedfc94d1b65fe2cd15741209b532e45c8 Mon Sep 17 00:00:00 2001 From: Stefan <96178532+stefan6419846@users.noreply.github.com> Date: Fri, 23 Aug 2024 08:59:51 +0200 Subject: [PATCH 20/43] MAINT: Improve wording of set_data error message (#2810) --- pypdf/generic/_data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index 399836be5f..2c6e20e575 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -1021,7 +1021,7 @@ def set_data(self, data: bytes) -> None: # deprecated super().set_data(FlateDecode.encode(data)) else: raise PdfReadError( - "Streams encoded with different filter from only FlateDecode is not supported" + "Streams encoded with a filter different from FlateDecode are not supported" ) From fba73a47fc08a28b6b7d013104e2d322039e9cae Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:05:37 +0200 Subject: [PATCH 21/43] ENH: add incremental capability to PdfWriter closes #2780 --- pypdf/_doc_common.py | 22 ++- pypdf/_page.py | 12 ++ pypdf/_protocols.py | 3 + pypdf/_reader.py | 2 + pypdf/_writer.py | 230 +++++++++++++++++++++++------- pypdf/constants.py | 7 +- pypdf/generic/_base.py | 90 +++++++++++- pypdf/generic/_data_structures.py | 26 ++++ tests/test_reader.py | 2 +- 9 files changed, 336 insertions(+), 58 deletions(-) diff --git a/pypdf/_doc_common.py b/pypdf/_doc_common.py index 4f607340db..12848fb8e7 100644 --- a/pypdf/_doc_common.py +++ b/pypdf/_doc_common.py @@ -254,6 +254,8 @@ class PdfDocCommon: _encryption: Optional[Encryption] = None + _readonly: bool = False + @property @abstractmethod def root_object(self) -> DictionaryObject: @@ -349,7 +351,7 @@ def get_num_pages(self) -> int: return self.root_object["/Pages"]["/Count"] # type: ignore else: if self.flattened_pages is None: - self._flatten() + self._flatten(self._readonly) assert self.flattened_pages is not None return len(self.flattened_pages) @@ -366,7 +368,7 @@ def get_page(self, page_number: int) -> PageObject: A :class:`PageObject` instance. """ if self.flattened_pages is None: - self._flatten() + self._flatten(self._readonly) assert self.flattened_pages is not None, "hint for mypy" return self.flattened_pages[page_number] @@ -1082,10 +1084,19 @@ def page_mode(self) -> Optional[PagemodeType]: def _flatten( self, + list_only: bool = False, pages: Union[None, DictionaryObject, PageObject] = None, inherit: Optional[Dict[str, Any]] = None, indirect_reference: Optional[IndirectObject] = None, ) -> None: + """ + prepare the document pages to ease searching + args: + list_only: will only list the pages witin _flatten_pages + pages, + inherit, + indirect_reference: used recursively to flatten the /Pages object + """ inheritable_page_attributes = ( NameObject(PG.RESOURCES), NameObject(PG.MEDIABOX), @@ -1122,7 +1133,7 @@ def _flatten( if obj: # damaged file may have invalid child in /Pages try: - self._flatten(obj, inherit, **addt) + self._flatten(list_only, obj, inherit, **addt) except RecursionError: raise PdfReadError( "Maximum recursion depth reached during page flattening." @@ -1134,7 +1145,8 @@ def _flatten( if attr_in not in pages: pages[attr_in] = value page_obj = PageObject(self, indirect_reference) - page_obj.update(pages) + if not list_only: + page_obj.update(pages) # TODO: Could flattened_pages be None at this point? self.flattened_pages.append(page_obj) # type: ignore @@ -1158,7 +1170,7 @@ def remove_page( or destinations to reference a detached page. """ if self.flattened_pages is None: - self._flatten() + self._flatten(self._readonly) assert self.flattened_pages is not None if isinstance(page, IndirectObject): p = page.get_object() diff --git a/pypdf/_page.py b/pypdf/_page.py index c51aee1ab0..8a8c47eecf 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -493,6 +493,18 @@ def __init__( # below Union for mypy but actually Optional[List[str]] self.indirect_reference = indirect_reference + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + Note: this function is overloaded to return the same results + as a DictionaryObject + """ + return hash( + (DictionaryObject, tuple(((k, v.hash_bin()) for k, v in self.items()))) + ) + def hash_value_data(self) -> bytes: data = super().hash_value_data() data += b"%d" % id(self) diff --git a/pypdf/_protocols.py b/pypdf/_protocols.py index b5fa14879c..431db1a112 100644 --- a/pypdf/_protocols.py +++ b/pypdf/_protocols.py @@ -74,6 +74,9 @@ class PdfWriterProtocol(PdfCommonDocProtocol, Protocol): _objects: List[Any] _id_translated: Dict[int, Dict[int, int]] + incremental: bool + _reader: Any # PdfReader + @abstractmethod def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: ... # pragma: no cover diff --git a/pypdf/_reader.py b/pypdf/_reader.py index 1ffcd436d2..cd6be50834 100644 --- a/pypdf/_reader.py +++ b/pypdf/_reader.py @@ -136,6 +136,7 @@ def __init__( with open(stream, "rb") as fh: stream = BytesIO(fh.read()) self._stream_opened = True + self._startxref: int = 0 self.read(stream) self.stream = stream @@ -560,6 +561,7 @@ def read(self, stream: StreamType) -> None: self._basic_validation(stream) self._find_eof_marker(stream) startxref = self._find_startxref_pos(stream) + self._startxref = startxref # check and eventually correct the startxref only in not strict xref_issue_nr = self._get_xref_issues(stream, startxref) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index a72e2a23df..e47679d452 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1,3 +1,6 @@ +# TODO : thing about pages to have a global soluce without rework; +# consider question about heritage of properties + # Copyright (c) 2006, Mathieu Fenniak # Copyright (c) 2007, Ashish Kulkarni # @@ -154,10 +157,35 @@ def __init__( self, fileobj: Union[None, PdfReader, StrByteType, Path] = "", clone_from: Union[None, PdfReader, StrByteType, Path] = None, + incremental: bool = False, ) -> None: - self._header = b"%PDF-1.3" + self.incremental = incremental + if self.incremental: + if isinstance(fileobj, (str, Path)): + with open(fileobj, "rb") as f: + fileobj = BytesIO(f.read(-1)) + if isinstance(fileobj, IO): + fileobj = BytesIO(fileobj.read(-1)) + if isinstance(fileobj, BytesIO): + fileobj = PdfReader(fileobj) + else: + raise PyPdfError("Invalid type for incremental mode") + self._reader = fileobj # prev content is in _reader.stream + self._header = fileobj.pdf_header.encode() + self._readonly = True # !!!TODO: to be analysed + else: + self._header = b"%PDF-1.3" + """ + The indirect objects in the PDF. + for the incremental it will be filled with None + in clone_reader_document_root + """ self._objects: List[Optional[PdfObject]] = [] - """The indirect objects in the PDF.""" + + """ + list of hashes after import; used to identify changes + """ + self._original_hash: List[int] = [] """Maps hash values of indirect objects to the list of IndirectObjects. This is used for compression. @@ -168,33 +196,7 @@ def __init__( dict[id(pdf)][(idnum, generation)] """ self._id_translated: Dict[int, Dict[int, int]] = {} - - # The root of our page tree node. - pages = DictionaryObject() - pages.update( - { - NameObject(PA.TYPE): NameObject("/Pages"), - NameObject(PA.COUNT): NumberObject(0), - NameObject(PA.KIDS): ArrayObject(), - } - ) - self._pages = self._add_object(pages) - self.flattened_pages = [] - - # info object - info = DictionaryObject() - info.update({NameObject("/Producer"): create_string_object("pypdf")}) - self._info_obj: PdfObject = self._add_object(info) - - # root object - self._root_object = DictionaryObject() - self._root_object.update( - { - NameObject(PA.TYPE): NameObject(CO.CATALOG), - NameObject(CO.PAGES): self._pages, - } - ) - self._root = self._add_object(self._root_object) + self._ID: Union[ArrayObject, None] = None def _get_clone_from( fileobj: Union[None, PdfReader, str, Path, IO[Any], BytesIO], @@ -227,14 +229,44 @@ def _get_clone_from( self.temp_fileobj = fileobj self.fileobj = "" self.with_as_usage = False + # The root of our page tree node. + pages = DictionaryObject() + pages.update( + { + NameObject(PA.TYPE): NameObject("/Pages"), + NameObject(PA.COUNT): NumberObject(0), + NameObject(PA.KIDS): ArrayObject(), + } + ) + self.flattened_pages = [] + self._encryption: Optional[Encryption] = None + self._encrypt_entry: Optional[DictionaryObject] = None + self._info_obj: PdfObject + if clone_from is not None: if not isinstance(clone_from, PdfReader): clone_from = PdfReader(clone_from) self.clone_document_from_reader(clone_from) - - self._encryption: Optional[Encryption] = None - self._encrypt_entry: Optional[DictionaryObject] = None - self._ID: Union[ArrayObject, None] = None + else: + self._pages = self._add_object(pages) + # root object + self._root_object = DictionaryObject() + self._root_object.update( + { + NameObject(PA.TYPE): NameObject(CO.CATALOG), + NameObject(CO.PAGES): self._pages, + } + ) + self._add_object(self._root_object) + # info object + info = DictionaryObject() + info.update({NameObject("/Producer"): create_string_object("pypdf")}) + self._info_obj = self._add_object(info) + if isinstance(self._ID, list): + if isinstance(self._ID[0], TextStringObject): + self._ID[0] = ByteStringObject(self._ID[0].get_original_bytes()) + if isinstance(self._ID[1], TextStringObject): + self._ID[1] = ByteStringObject(self._ID[1].get_original_bytes()) # for commonality @property @@ -1115,18 +1147,29 @@ def clone_reader_document_root(self, reader: PdfReader) -> None: Args: reader: PdfReader from which the document root should be copied. """ - self._objects.clear() + if self.incremental: + self._objects = [None] * cast(int, reader.trailer["/Size"]) + else: + self._objects.clear() self._root_object = reader.root_object.clone(self) - self._root = self._root_object.indirect_reference # type: ignore[assignment] self._pages = self._root_object.raw_get("/Pages") + + assert len(self._objects) <= cast(int, reader.trailer["/Size"]) # for pytest + # must be done here before rewriting + if self.incremental: + self._original_hash = [ + (obj.hash_bin() if obj is not None else 0) for obj in self._objects + ] self._flatten() assert self.flattened_pages is not None for p in self.flattened_pages: - p[NameObject("/Parent")] = self._pages - self._objects[cast(IndirectObject, p.indirect_reference).idnum - 1] = p - cast(DictionaryObject, self._pages.get_object())[ - NameObject("/Kids") - ] = ArrayObject([p.indirect_reference for p in self.flattened_pages]) + self._replace_object(cast(IndirectObject, p.indirect_reference).idnum, p) + if not self.incremental: + p[NameObject("/Parent")] = self._pages + if not self.incremental: + cast(DictionaryObject, self._pages.get_object())[ + NameObject("/Kids") + ] = ArrayObject([p.indirect_reference for p in self.flattened_pages]) def clone_document_from_reader( self, @@ -1148,13 +1191,26 @@ def clone_document_from_reader( document. """ self.clone_reader_document_root(reader) - self._info_obj = self._add_object(DictionaryObject()) if TK.INFO in reader.trailer: - self._info = reader._info # actually copy fields + if self.incremental: + inf = reader._info + if inf is not None: + self._info_obj = cast( + IndirectObject, inf.clone(self).indirect_reference + ) + self._original_hash[ + cast(IndirectObject, self._info_obj.indirect_reference).idnum - 1 + ] = self._info_obj.hash_bin() + else: + self._info = reader._info # actually copy fields + + else: + self._info_obj = self._add_object(DictionaryObject()) try: self._ID = cast(ArrayObject, reader._ID).clone(self) except AttributeError: pass + if callable(after_page_append): for page in cast( ArrayObject, cast(DictionaryObject, self._pages.get_object())["/Kids"] @@ -1257,9 +1313,17 @@ def write_stream(self, stream: StreamType) -> None: # self._root = self._add_object(self._root_object) # self._sweep_indirect_references(self._root) - object_positions, free_objects = self._write_pdf_structure(stream) - xref_location = self._write_xref_table(stream, object_positions, free_objects) - self._write_trailer(stream, xref_location) + if self.incremental: + self._reader.stream.seek(0) + stream.write(self._reader.stream.read(-1)) + xref_location = self._write_increment(stream) + self._write_trailer(stream, xref_location) + else: + object_positions, free_objects = self._write_pdf_structure(stream) + xref_location = self._write_xref_table( + stream, object_positions, free_objects + ) + self._write_trailer(stream, xref_location) def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: """ @@ -1291,6 +1355,75 @@ def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: return my_file, stream + def _list_objects_in_increment(self) -> List[IndirectObject]: + """ + For debug / analysis + Provides the list of new/modified objects that are to be written + """ + ## lst = [] + ## for i in range(len(self._objects)): + ## if (self._objects[i] is not None and + ## (i >= len(self._original_hash) + ## or cast(PdfObject,self._objects[i]).hash_bin() != self._original_hash[i] + ## )): + ## lst.append(self._objects[i].indirect_reference) + return [ + cast(IndirectObject, self._objects[i]).indirect_reference + for i in range(len(self._objects)) + if ( + self._objects[i] is not None + and ( + i >= len(self._original_hash) + or cast(PdfObject, self._objects[i]).hash_bin() + != self._original_hash[i] + ) + ) + ] + + def _write_increment(self, stream: StreamType) -> int: + object_positions = {} + object_blocks = [] + current_start = -1 + current_stop = -2 + for i, obj in enumerate(self._objects): + if self._objects[i] is not None and ( + i >= len(self._original_hash) + or cast(PdfObject, self._objects[i]).hash_bin() + != self._original_hash[i] + ): + idnum = i + 1 + assert isinstance(obj, PdfObject) # mypy + # first write new/modified object + object_positions[idnum] = stream.tell() + stream.write(f"{idnum} 0 obj\n".encode()) + if self._encryption and obj != self._encrypt_entry: + obj = self._encryption.encrypt_object(obj, idnum, 0) + obj.write_to_stream(stream) + stream.write(b"\nendobj\n") + + # prepare xref + if idnum != current_stop: + if current_start > 0: + object_blocks.append( + [current_start, current_stop - current_start] + ) + current_start = idnum + current_stop = idnum + 1 + else: + current_stop = idnum + 1 + if current_start > 0: + object_blocks.append([current_start, current_stop - current_start]) + # write incremented xref + xref_location = stream.tell() + stream.write(b"xref\n") + stream.write(b"0 1\n") + stream.write(b"0000000000 65535 f \n") + for block in object_blocks: + stream.write(f"{block[0]} {block[1]}\n".encode()) + for i in range(block[0], block[0] + block[1]): + stream.write(f"{object_positions[i]:0>10} {0:0>5} n \n".encode()) + return xref_location + def _write_pdf_structure(self, stream: StreamType) -> Tuple[List[int], List[int]]: object_positions = [] free_objects = [] # will contain list of all free entries @@ -1337,14 +1470,15 @@ def _write_trailer(self, stream: StreamType, xref_location: int) -> None: of certain special objects within the body of the file. """ stream.write(b"trailer\n") - trailer = DictionaryObject() - trailer.update( + trailer = DictionaryObject( { NameObject(TK.SIZE): NumberObject(len(self._objects) + 1), - NameObject(TK.ROOT): self._root, + NameObject(TK.ROOT): self.root_object.indirect_reference, NameObject(TK.INFO): self._info_obj, } ) + if self.incremental: + trailer[NameObject(TK.PREV)] = NumberObject(self._reader._startxref) if self._ID: trailer[NameObject(TK.ID)] = self._ID if self._encrypt_entry: diff --git a/pypdf/constants.py b/pypdf/constants.py index 745774e2a8..a7e67aacc8 100644 --- a/pypdf/constants.py +++ b/pypdf/constants.py @@ -33,6 +33,7 @@ class TrailerKeys: ID = "/ID" INFO = "/Info" SIZE = "/Size" + PREV = "/Prev" class CatalogAttributes: @@ -209,7 +210,7 @@ class PagesAttributes: PARENT = "/Parent" # dictionary, required; indirect reference to pages object KIDS = "/Kids" # array, required; List of indirect references COUNT = "/Count" # integer, required; the number of leaf nodes (page objects) - # that are descendants of this node within the page tree + # that are descendants of this node within the page tree class PageAttributes: @@ -217,7 +218,9 @@ class PageAttributes: TYPE = "/Type" # name, required; must be /Page PARENT = "/Parent" # dictionary, required; a pages object - LAST_MODIFIED = "/LastModified" # date, optional; date and time of last modification + LAST_MODIFIED = ( + "/LastModified" # date, optional; date and time of last modification + ) RESOURCES = "/Resources" # dictionary, required if there are any MEDIABOX = "/MediaBox" # rectangle, required; rectangle specifying page size CROPBOX = "/CropBox" # rectangle, optional diff --git a/pypdf/generic/_base.py b/pypdf/generic/_base.py index f48dc66c38..9dfb25a29e 100644 --- a/pypdf/generic/_base.py +++ b/pypdf/generic/_base.py @@ -53,6 +53,16 @@ class PdfObject(PdfObjectProtocol): hash_func: Callable[..., "hashlib._Hash"] = hashlib.sha1 indirect_reference: Optional["IndirectObject"] + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + raise NotImplementedError( + f"{self.__class__.__name__} does not implement .hash_bin() so far" + ) + def hash_value_data(self) -> bytes: return ("%s" % self).encode() @@ -121,7 +131,15 @@ def _reference_clone( ind = self.indirect_reference except AttributeError: return clone - i = len(pdf_dest._objects) + 1 + if ( + pdf_dest.incremental + and ind is not None + and ind.pdf == pdf_dest._reader + and ind.idnum <= len(pdf_dest._objects) + ): + i = ind.idnum + else: + i = len(pdf_dest._objects) + 1 if ind is not None: if id(ind.pdf) not in pdf_dest._id_translated: pdf_dest._id_translated[id(ind.pdf)] = {} @@ -136,7 +154,11 @@ def _reference_clone( assert obj is not None return obj pdf_dest._id_translated[id(ind.pdf)][ind.idnum] = i - pdf_dest._objects.append(clone) + try: + pdf_dest._objects[i - 1] = clone + except IndexError: + pdf_dest._objects.append(clone) + i = len(pdf_dest._objects) clone.indirect_reference = IndirectObject(i, 0, pdf_dest) return clone @@ -162,6 +184,14 @@ def clone( "NullObject", self._reference_clone(NullObject(), pdf_dest, force_duplicate) ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__,)) + def write_to_stream( self, stream: StreamType, encryption_key: Union[None, str, bytes] = None ) -> None: @@ -198,6 +228,14 @@ def clone( self._reference_clone(BooleanObject(self.value), pdf_dest, force_duplicate), ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self.value)) + def __eq__(self, __o: object) -> bool: if isinstance(__o, BooleanObject): return self.value == __o.value @@ -242,6 +280,14 @@ def __init__(self, idnum: int, generation: int, pdf: Any) -> None: # PdfReader def __hash__(self) -> int: return hash((self.idnum, self.generation, id(self.pdf))) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self.idnum, self.generation, id(self.pdf))) + def clone( self, pdf_dest: PdfWriterProtocol, @@ -400,6 +446,14 @@ def clone( self._reference_clone(FloatObject(self), pdf_dest, force_duplicate), ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self.as_numeric)) + def myrepr(self) -> str: if self == 0: return "0.0" @@ -445,6 +499,14 @@ def clone( self._reference_clone(NumberObject(self), pdf_dest, force_duplicate), ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self.as_numeric())) + def as_numeric(self) -> int: return int(repr(self).encode("utf8")) @@ -488,6 +550,14 @@ def clone( ), ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, bytes(self))) + @property def original_bytes(self) -> bytes: """For compatibility with TextStringObject.original_bytes.""" @@ -567,6 +637,14 @@ def clone( "TextStringObject", self._reference_clone(obj, pdf_dest, force_duplicate) ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self.original_bytes)) + @property def original_bytes(self) -> bytes: """ @@ -663,6 +741,14 @@ def clone( self._reference_clone(NameObject(self), pdf_dest, force_duplicate), ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, self)) + def write_to_stream( self, stream: StreamType, encryption_key: Union[None, str, bytes] = None ) -> None: diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index 399836be5f..e53129a485 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -131,6 +131,14 @@ def clone( arr.append(data) return arr + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((self.__class__, tuple(x.hash_bin() for x in self))) + def items(self) -> Iterable[Any]: """Emulate DictionaryObject.items for a list (index, object).""" return enumerate(self) @@ -371,6 +379,16 @@ def _clone( else v ) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash( + (self.__class__, tuple(((k, v.hash_bin()) for k, v in self.items()))) + ) + def raw_get(self, key: Any) -> Any: return dict.__getitem__(self, key) @@ -876,6 +894,14 @@ def _clone( pass super()._clone(src, pdf_dest, force_duplicate, ignore_fields, visited) + def hash_bin(self) -> int: + """ + Returns: + hash considering type and value + used to detect modified object + """ + return hash((super().hash_bin(), self.get_data())) + def get_data(self) -> bytes: return self._data diff --git a/tests/test_reader.py b/tests/test_reader.py index 0413a91356..c1bdff9445 100644 --- a/tests/test_reader.py +++ b/tests/test_reader.py @@ -212,7 +212,7 @@ def test_get_outline(src, outline_elements): pytest.param( "imagemagick-ASCII85Decode.pdf", ["Im0.png"], - marks=pytest.mark.xfail(reason="broken image extraction"), + # marks=pytest.mark.xfail(reason="broken image extraction"), ), ("imagemagick-CCITTFaxDecode.pdf", ["Im0.tiff"]), (SAMPLE_ROOT / "019-grayscale-image/grayscale-image.pdf", ["X0.png"]), From 0543709a702921f767ec04aaa9ea40db1b7272bc Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sat, 24 Aug 2024 11:21:22 +0200 Subject: [PATCH 22/43] fix test --- pypdf/_writer.py | 52 ++++++++++-------- ..._Vicksburg_Sample_OCR-crazyones-merged.pdf | Bin 217093 -> 217093 bytes 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index e47679d452..dd96251dee 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -160,21 +160,6 @@ def __init__( incremental: bool = False, ) -> None: self.incremental = incremental - if self.incremental: - if isinstance(fileobj, (str, Path)): - with open(fileobj, "rb") as f: - fileobj = BytesIO(f.read(-1)) - if isinstance(fileobj, IO): - fileobj = BytesIO(fileobj.read(-1)) - if isinstance(fileobj, BytesIO): - fileobj = PdfReader(fileobj) - else: - raise PyPdfError("Invalid type for incremental mode") - self._reader = fileobj # prev content is in _reader.stream - self._header = fileobj.pdf_header.encode() - self._readonly = True # !!!TODO: to be analysed - else: - self._header = b"%PDF-1.3" """ The indirect objects in the PDF. for the incremental it will be filled with None @@ -197,6 +182,28 @@ def __init__( """ self._id_translated: Dict[int, Dict[int, int]] = {} self._ID: Union[ArrayObject, None] = None + self._info_obj: PdfObject + + if self.incremental: + if isinstance(fileobj, (str, Path)): + with open(fileobj, "rb") as f: + fileobj = BytesIO(f.read(-1)) + if isinstance(fileobj, IO): + fileobj = BytesIO(fileobj.read(-1)) + if isinstance(fileobj, BytesIO): + fileobj = PdfReader(fileobj) + else: + raise PyPdfError("Invalid type for incremental mode") + self._reader = fileobj # prev content is in _reader.stream + self._header = fileobj.pdf_header.encode() + self._readonly = True # !!!TODO: to be analysed + else: + self._header = b"%PDF-1.3" + self._info_obj = self._add_object( + DictionaryObject( + {NameObject("/Producer"): create_string_object("pypdf")} + ) + ) def _get_clone_from( fileobj: Union[None, PdfReader, str, Path, IO[Any], BytesIO], @@ -241,7 +248,6 @@ def _get_clone_from( self.flattened_pages = [] self._encryption: Optional[Encryption] = None self._encrypt_entry: Optional[DictionaryObject] = None - self._info_obj: PdfObject if clone_from is not None: if not isinstance(clone_from, PdfReader): @@ -258,10 +264,6 @@ def _get_clone_from( } ) self._add_object(self._root_object) - # info object - info = DictionaryObject() - info.update({NameObject("/Producer"): create_string_object("pypdf")}) - self._info_obj = self._add_object(info) if isinstance(self._ID, list): if isinstance(self._ID[0], TextStringObject): self._ID[0] = ByteStringObject(self._ID[0].get_original_bytes()) @@ -1192,8 +1194,8 @@ def clone_document_from_reader( """ self.clone_reader_document_root(reader) if TK.INFO in reader.trailer: + inf = reader._info if self.incremental: - inf = reader._info if inf is not None: self._info_obj = cast( IndirectObject, inf.clone(self).indirect_reference @@ -1201,11 +1203,13 @@ def clone_document_from_reader( self._original_hash[ cast(IndirectObject, self._info_obj.indirect_reference).idnum - 1 ] = self._info_obj.hash_bin() - else: - self._info = reader._info # actually copy fields - + elif inf is not None: + self._info_obj = self._add_object( + DictionaryObject(cast(DictionaryObject, inf.get_object())) + ) else: self._info_obj = self._add_object(DictionaryObject()) + try: self._ID = cast(ArrayObject, reader._ID).clone(self) except AttributeError: diff --git a/resources/Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf b/resources/Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf index a53f28f0be432c38a1fff33672a2170eeb5f553f..8a04001ddae371fa756d1dc2f607fd42965f0f8f 100644 GIT binary patch delta 94 zcmZo&z}vcjcY^f925Cm4i2^ATXDb5fy%!jbHuo{k?gCO;J%WZn%4YBN)r>~lMKT!= s&IQuf*qG*o1L@*8CW#mz-Ls2HXaci=smb<*Ud%46jE2*n`7_G{06VfE8vp Date: Sun, 25 Aug 2024 17:10:22 +0200 Subject: [PATCH 23/43] fixes + first test --- pypdf/_page.py | 2 ++ pypdf/_writer.py | 20 ++++++++--------- pypdf/generic/_data_structures.py | 3 ++- tests/test_writer.py | 36 +++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 12 deletions(-) diff --git a/pypdf/_page.py b/pypdf/_page.py index 8a8c47eecf..79cdb7adf1 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -492,6 +492,8 @@ def __init__( self.inline_images: Optional[Dict[str, ImageFile]] = None # below Union for mypy but actually Optional[List[str]] self.indirect_reference = indirect_reference + if indirect_reference is not None: + self.update(cast(DictionaryObject, indirect_reference.get_object())) def hash_bin(self) -> int: """ diff --git a/pypdf/_writer.py b/pypdf/_writer.py index dd96251dee..24da873372 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1202,7 +1202,7 @@ def clone_document_from_reader( ) self._original_hash[ cast(IndirectObject, self._info_obj.indirect_reference).idnum - 1 - ] = self._info_obj.hash_bin() + ] = cast(DictionaryObject, self._info_obj.get_object()).hash_bin() elif inf is not None: self._info_obj = self._add_object( DictionaryObject(cast(DictionaryObject, inf.get_object())) @@ -1359,18 +1359,16 @@ def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO[Any]]: return my_file, stream - def _list_objects_in_increment(self) -> List[IndirectObject]: + def list_objects_in_increment(self) -> List[IndirectObject]: """ For debug / analysis - Provides the list of new/modified objects that are to be written - """ - ## lst = [] - ## for i in range(len(self._objects)): - ## if (self._objects[i] is not None and - ## (i >= len(self._original_hash) - ## or cast(PdfObject,self._objects[i]).hash_bin() != self._original_hash[i] - ## )): - ## lst.append(self._objects[i].indirect_reference) + Provides the list of new/modified objects that will be written + in the increment + Deleted Objects will not be freeed but will become orphans + + Returns: + List of (new / modified) IndirectObjects + """ return [ cast(IndirectObject, self._objects[i]).indirect_reference for i in range(len(self._objects)) diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index e53129a485..00f4ceab8b 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -900,7 +900,8 @@ def hash_bin(self) -> int: hash considering type and value used to detect modified object """ - return hash((super().hash_bin(), self.get_data())) + # use of _data to prevent errors on non decoded stream such as JBIG2 + return hash((super().hash_bin(), self._data)) def get_data(self) -> bytes: return self._data diff --git a/tests/test_writer.py b/tests/test_writer.py index b6a47a18c8..3ac1f06da9 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2354,3 +2354,39 @@ def test_utf16_metadata(): b"/Subject (\\376\\377\\000I\\000n\\000v\\000o\\000i\\000c\\000e" b"\\000 \\041\\026\\000A\\000I\\000\\137\\0000\\0004\\0007)" ) + + +def test_list_objects_in_increment(caplog): + """Tests for #2811""" + writer = PdfWriter( + RESOURCE_ROOT / "Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf", + incremental=True, + ) + # Contains JBIG2 not decoded for the moment + assert writer.list_objects_in_increment() == [] # no flowdown of properties + # modify one object + writer.pages[0][NameObject("/MediaBox")] = ArrayObject( + [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] + ) + assert writer.list_objects_in_increment() == [IndirectObject(4, 0, writer)] + b = BytesIO() + writer.write(b) + assert b.getvalue().startswith(writer._reader.stream.getvalue()) + b.seek(0) + reader = PdfReader(b) + assert reader.pages[0]["/MediaBox"] == ArrayObject( + [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] + ) + with pytest.raises(PyPdfError): + writer = PdfWriter(reader, incremental=True) + b.seek(0) + writer = PdfWriter(b, incremental=True) + assert writer.list_objects_in_increment() == [] # no flowdown of properties + + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf", incremental=True) + # 1 object is modified: page 0 inherits MediaBox so is changed + assert len(writer.list_objects_in_increment()) == 1 + + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf", incremental=False) + # 1 object is modified: page 0 inherits MediaBox so is changed + assert len(writer.list_objects_in_increment()) == len(writer._objects) From 1067b744eeac6374344a8c63ddce742d87d49d91 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 25 Aug 2024 19:06:16 +0200 Subject: [PATCH 24/43] coverage --- pypdf/_page.py | 2 +- pypdf/_writer.py | 2 -- tests/test_generic.py | 6 ++++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pypdf/_page.py b/pypdf/_page.py index 79cdb7adf1..c81eeb8cd5 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -501,7 +501,7 @@ def hash_bin(self) -> int: hash considering type and value used to detect modified object Note: this function is overloaded to return the same results - as a DictionaryObject + as a DictionaryObject """ return hash( (DictionaryObject, tuple(((k, v.hash_bin()) for k, v in self.items()))) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 24da873372..e052b94ae3 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -188,8 +188,6 @@ def __init__( if isinstance(fileobj, (str, Path)): with open(fileobj, "rb") as f: fileobj = BytesIO(f.read(-1)) - if isinstance(fileobj, IO): - fileobj = BytesIO(fileobj.read(-1)) if isinstance(fileobj, BytesIO): fileobj = PdfReader(fileobj) else: diff --git a/tests/test_generic.py b/tests/test_generic.py index 6b8ae0151c..bc83ea4fe5 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -1472,3 +1472,9 @@ def test_unitary_extract_inline(): ec.set_data(b) co = ContentStream(ec, None) assert co.operations[7][0]["data"] == b"abcdefghijklmnop" + + +def test_missing_hashbin(): + assert NullObject().hash_bin() == hash((NullObject,)) + t = ByteStringObject(b"123") + assert t.hash_bin() == hash((ByteStringObject, b"123")) From f1d3fbe6367e0fcc1e2efc79c1932643851dd455 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 25 Aug 2024 19:39:37 +0200 Subject: [PATCH 25/43] coverage --- pypdf/_page.py | 1 + pypdf/_writer.py | 2 ++ tests/test_writer.py | 4 ++++ 3 files changed, 7 insertions(+) diff --git a/pypdf/_page.py b/pypdf/_page.py index c81eeb8cd5..aebe9ebbd7 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -500,6 +500,7 @@ def hash_bin(self) -> int: Returns: hash considering type and value used to detect modified object + Note: this function is overloaded to return the same results as a DictionaryObject """ diff --git a/pypdf/_writer.py b/pypdf/_writer.py index e052b94ae3..a0d55e3c52 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1396,8 +1396,10 @@ def _write_increment(self, stream: StreamType) -> int: # first write new/modified object object_positions[idnum] = stream.tell() stream.write(f"{idnum} 0 obj\n".encode()) + """ encryption is not operational if self._encryption and obj != self._encrypt_entry: obj = self._encryption.encrypt_object(obj, idnum, 0) + """ obj.write_to_stream(stream) stream.write(b"\nendobj\n") diff --git a/tests/test_writer.py b/tests/test_writer.py index 3ac1f06da9..1a172e8c35 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2369,6 +2369,10 @@ def test_list_objects_in_increment(caplog): [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] ) assert writer.list_objects_in_increment() == [IndirectObject(4, 0, writer)] + writer.pages[5][NameObject("/MediaBox")] = ArrayObject( + [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] + ) + assert len(writer.list_objects_in_increment()) == 2 b = BytesIO() writer.write(b) assert b.getvalue().startswith(writer._reader.stream.getvalue()) From ae97bc73b4f6b0b2653009b47b5b6ead47e13424 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 13:06:03 +0200 Subject: [PATCH 26/43] cope with multiple level pages --- pypdf/_doc_common.py | 41 ++++++++++++++++++++++++++++++++++++++--- pypdf/_page.py | 24 +++++++++++++++--------- pypdf/_writer.py | 43 ++++++++++++++++++++++++++++++++----------- tests/test_page.py | 4 +++- 4 files changed, 88 insertions(+), 24 deletions(-) diff --git a/pypdf/_doc_common.py b/pypdf/_doc_common.py index 12848fb8e7..ea3c93aab3 100644 --- a/pypdf/_doc_common.py +++ b/pypdf/_doc_common.py @@ -65,9 +65,7 @@ from .constants import FieldDictionaryAttributes as FA from .constants import PageAttributes as PG from .constants import PagesAttributes as PA -from .errors import ( - PdfReadError, -) +from .errors import PdfReadError, PyPdfError from .generic import ( ArrayObject, BooleanObject, @@ -372,6 +370,43 @@ def get_page(self, page_number: int) -> PageObject: assert self.flattened_pages is not None, "hint for mypy" return self.flattened_pages[page_number] + def _get_page_in_node( + self, + page_number: int, + ) -> Tuple[DictionaryObject, int]: + """ + Retrieve the node and position within the /Kids containing the page + if page_number is greater than the number of page, it returns top node, -1 + """ + top = cast(DictionaryObject, self.root_object["/Pages"]) + + def recurs(node: DictionaryObject, mi: int) -> Tuple[Optional[PdfObject], int]: + ma = cast(int, node.get("/Count", 1)) # default 1 for /Page types + if node["/Type"] == "/Page": + if page_number == mi: + return node, -1 + # else: + return None, mi + 1 + if (page_number - mi) >= ma: # not in nodes below + if node == top: + return top, -1 + # else + return None, mi + ma + for idx, kid in enumerate(cast(ArrayObject, node["/Kids"])): + kid = cast(DictionaryObject, kid.get_object()) + n, i = recurs(kid, mi) + if n is not None: # page has just been found ... + if i < 0: # ... just below! + return node, idx + # else: # ... at lower levels + return n, i + mi = i + raise PyPdfError("abnormal, can not find the node") + + node, idx = recurs(top, 0) + assert isinstance(node, DictionaryObject) + return node, idx + @property def named_destinations(self) -> Dict[str, Any]: """ diff --git a/pypdf/_page.py b/pypdf/_page.py index aebe9ebbd7..b9f6e012bf 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -2414,27 +2414,33 @@ def __delitem__(self, index: Union[int, slice]) -> None: raise IndexError("index out of range") ind = self[index].indirect_reference assert ind is not None - parent = cast(DictionaryObject, ind.get_object()).get("/Parent", None) + parent: Optional[PdfObject] = cast(DictionaryObject, ind.get_object()).get( + "/Parent", None + ) + first = True while parent is not None: parent = cast(DictionaryObject, parent.get_object()) try: - i = parent["/Kids"].index(ind) - del parent["/Kids"][i] + i = cast(ArrayObject, parent["/Kids"]).index(ind) + del cast(ArrayObject, parent["/Kids"])[i] + first = False try: assert ind is not None del ind.pdf.flattened_pages[index] # case of page in a Reader except Exception: # pragma: no cover pass if "/Count" in parent: - parent[NameObject("/Count")] = NumberObject(parent["/Count"] - 1) - if len(parent["/Kids"]) == 0: + parent[NameObject("/Count")] = NumberObject( + cast(int, parent["/Count"]) - 1 + ) + if len(cast(ArrayObject, parent["/Kids"])) == 0: # No more objects in this part of this sub tree ind = parent.indirect_reference - parent = cast(DictionaryObject, parent.get("/Parent", None)) - else: - parent = None + parent = parent.get("/Parent", None) except ValueError: # from index - raise PdfReadError(f"Page Not Found in Page Tree {ind}") + if first: + raise PdfReadError(f"Page Not Found in Page Tree {ind}") + break def __iter__(self) -> Iterator[PageObject]: for i in range(len(self)): diff --git a/pypdf/_writer.py b/pypdf/_writer.py index a0d55e3c52..e2747c1533 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -439,10 +439,12 @@ def _replace_object( def _add_page( self, page: PageObject, - action: Callable[[Any, Union[PageObject, IndirectObject]], None], + index: int, excluded_keys: Iterable[str] = (), ) -> PageObject: - assert cast(str, page[PA.TYPE]) == CO.PAGE + if not isinstance(page, PageObject) or page.get(PA.TYPE, None) != CO.PAGE: + raise ValueError("Invalid page Object") + assert self.flattened_pages is not None, "for mypy" page_org = page excluded_keys = list(excluded_keys) excluded_keys += [PA.PARENT, "/StructParents"] @@ -460,13 +462,23 @@ def _add_page( if page_org.pdf is not None: other = page_org.pdf.pdf_header self.pdf_header = _get_max_pdf_version_header(self.pdf_header, other) - page[NameObject(PA.PARENT)] = self._pages - pages = cast(DictionaryObject, self.get_object(self._pages)) - assert page.indirect_reference is not None - action(pages[PA.KIDS], page.indirect_reference) - action(self.flattened_pages, page) - page_count = cast(int, pages[PA.COUNT]) - pages[NameObject(PA.COUNT)] = NumberObject(page_count + 1) + node, idx = self._get_page_in_node(index) + page[NameObject(PA.PARENT)] = node.indirect_reference + if idx >= 0: # to be a + cast(ArrayObject, node[PA.KIDS]).insert(idx, page.indirect_reference) + if self.flattened_pages != node[PA.KIDS]: + self.flattened_pages.insert(index, page) + else: + cast(ArrayObject, node[PA.KIDS]).append(page.indirect_reference) + if self.flattened_pages != node[PA.KIDS]: + self.flattened_pages.append(page) + cpt = 1000 + while node is not None: + node[NameObject(PA.COUNT)] = NumberObject(cast(int, node[PA.COUNT]) + 1) + node = node.get(PA.PARENT, None) + cpt -= 1 + if cpt < 0: + raise PyPdfError("Recursive Error detected") return page def set_need_appearances_writer(self, state: bool = True) -> None: @@ -529,7 +541,8 @@ def add_page( Returns: The added PageObject. """ - return self._add_page(page, list.append, excluded_keys) + assert self.flattened_pages is not None + return self._add_page(page, len(self.flattened_pages), excluded_keys) def insert_page( self, @@ -549,7 +562,15 @@ def insert_page( Returns: The added PageObject. """ - return self._add_page(page, lambda kids, p: kids.insert(index, p)) + assert self.flattened_pages is not None + if index < 0: + index = len(self.flattened_pages) + index + if index < 0: + raise ValueError("invalid index value") + if index >= len(self.flattened_pages): + return self.add_page(page, excluded_keys) + else: + return self._add_page(page, index, excluded_keys) def _get_page_number_by_indirect( self, indirect_reference: Union[None, int, NullObject, IndirectObject] diff --git a/tests/test_page.py b/tests/test_page.py index 72df648e45..8bde3e82e7 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -1251,7 +1251,9 @@ def test_del_pages(): del pp["/Parent"].get_object()["/Kids"][i] with pytest.raises(PdfReadError): del reader.pages[2] - # reader is corrupted we have to reload it + + url = "https://github.com/py-pdf/pypdf/files/13679585/test2_P038-038.pdf" + name = "iss2343.pdf" reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) del reader.pages[:] assert len(reader.pages) == 0 From d9a99d9e4415a188b45dbf37e79925e9cac9193a Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 13:24:52 +0200 Subject: [PATCH 27/43] test + doc --- pypdf/_writer.py | 27 +++++++++++++++++++++------ tests/test_page.py | 5 +++-- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index e2747c1533..59d6b38220 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -151,6 +151,15 @@ class PdfWriter(PdfDocCommon): cloning a PDF file during initialization. Typically data is added from a :class:`PdfReader`. + + clone_from: identical to fileobj (for compatibility) + + incremental: `bool` + If true, loads the document and set the PdfWriter in incremental mode + + When writing the original document is written first and new/modified + are appened. to be used for signed document/forms to keep signature + valid. """ def __init__( @@ -161,26 +170,32 @@ def __init__( ) -> None: self.incremental = incremental """ + Returns if the PdfWriter object has been started in incremental mode + """ + + self._objects: List[Optional[PdfObject]] = [] + """ The indirect objects in the PDF. for the incremental it will be filled with None in clone_reader_document_root """ - self._objects: List[Optional[PdfObject]] = [] + self._original_hash: List[int] = [] """ list of hashes after import; used to identify changes """ - self._original_hash: List[int] = [] - """Maps hash values of indirect objects to the list of IndirectObjects. - This is used for compression. - """ self._idnum_hash: Dict[bytes, Tuple[IndirectObject, List[IndirectObject]]] = {} + """ + Maps hash values of indirect objects to the list of IndirectObjects. + This is used for compression. + """ + self._id_translated: Dict[int, Dict[int, int]] = {} """List of already translated IDs. dict[id(pdf)][(idnum, generation)] """ - self._id_translated: Dict[int, Dict[int, int]] = {} + self._ID: Union[ArrayObject, None] = None self._info_obj: PdfObject diff --git a/tests/test_page.py b/tests/test_page.py index 8bde3e82e7..ac9d241a73 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -1252,9 +1252,10 @@ def test_del_pages(): with pytest.raises(PdfReadError): del reader.pages[2] - url = "https://github.com/py-pdf/pypdf/files/13679585/test2_P038-038.pdf" - name = "iss2343.pdf" + url = "https://github.com/py-pdf/pypdf/files/13946477/panda.pdf" + name = "iss2343b.pdf" reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + del reader.pages[4] # to propagate among /Pages del reader.pages[:] assert len(reader.pages) == 0 assert len(reader.trailer["/Root"]["/Pages"]["/Kids"]) == 0 From 3c4cfdc2510587c8a75cbe6d6760362db44a2fa1 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 13:30:20 +0200 Subject: [PATCH 28/43] coverage --- tests/test_page.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_page.py b/tests/test_page.py index ac9d241a73..d9efd4992d 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -1254,11 +1254,11 @@ def test_del_pages(): url = "https://github.com/py-pdf/pypdf/files/13946477/panda.pdf" name = "iss2343b.pdf" - reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + reader = PdfWriter(BytesIO(get_data_from_url(url, name=name)), incremental=True) del reader.pages[4] # to propagate among /Pages del reader.pages[:] assert len(reader.pages) == 0 - assert len(reader.trailer["/Root"]["/Pages"]["/Kids"]) == 0 + assert len(reader.root_object["/Pages"]["/Kids"]) == 0 assert len(reader.flattened_pages) == 0 From 38d4b351d81719ed774476f0cf7ee7187ff55a9e Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 13:57:20 +0200 Subject: [PATCH 29/43] coverage --- pypdf/_writer.py | 11 +++-------- tests/test_page.py | 23 ++++++++++++++++------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 59d6b38220..b981cb0d50 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1,6 +1,3 @@ -# TODO : thing about pages to have a global soluce without rework; -# consider question about heritage of properties - # Copyright (c) 2006, Mathieu Fenniak # Copyright (c) 2007, Ashish Kulkarni # @@ -154,12 +151,10 @@ class PdfWriter(PdfDocCommon): clone_from: identical to fileobj (for compatibility) - incremental: `bool` - If true, loads the document and set the PdfWriter in incremental mode + incremental: If true, loads the document and set the PdfWriter in incremental mode - When writing the original document is written first and new/modified - are appened. to be used for signed document/forms to keep signature - valid. + When writing in incremental the original document is written first and new/modified + are appened. to be used for signed document/forms to keep signature valid. """ def __init__( diff --git a/tests/test_page.py b/tests/test_page.py index d9efd4992d..dc3ec9c558 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -12,7 +12,7 @@ from pypdf import PdfReader, PdfWriter, Transformation from pypdf._page import PageObject from pypdf.constants import PageAttributes as PG -from pypdf.errors import PdfReadError, PdfReadWarning +from pypdf.errors import PdfReadError, PdfReadWarning, PyPdfError from pypdf.generic import ( ArrayObject, ContentStream, @@ -887,6 +887,8 @@ def test_annotation_setter(pdf_file_path): page = reader.pages[0] writer = PdfWriter() writer.add_page(page) + with pytest.raises(ValueError): + writer.add_page(DictionaryObject()) # Act page_number = 0 @@ -1254,12 +1256,19 @@ def test_del_pages(): url = "https://github.com/py-pdf/pypdf/files/13946477/panda.pdf" name = "iss2343b.pdf" - reader = PdfWriter(BytesIO(get_data_from_url(url, name=name)), incremental=True) - del reader.pages[4] # to propagate among /Pages - del reader.pages[:] - assert len(reader.pages) == 0 - assert len(reader.root_object["/Pages"]["/Kids"]) == 0 - assert len(reader.flattened_pages) == 0 + writer = PdfWriter(BytesIO(get_data_from_url(url, name=name)), incremental=True) + node, idx = writer._get_page_in_node(53) + assert (node.indirect_reference.idnum, idx) == (11776, 1) + node, idx = writer._get_page_in_node(10000) + assert (node.indirect_reference.idnum, idx) == (11769, -1) + with pytest.raises(PyPdfError): + writer._get_page_in_node(-1) + + del writer.pages[4] # to propagate among /Pages + del writer.pages[:] + assert len(writer.pages) == 0 + assert len(writer.root_object["/Pages"]["/Kids"]) == 0 + assert len(writer.flattened_pages) == 0 def test_pdf_pages_missing_type(): From 79eca73b7774dadedac01c188681b4559e6cfcaf Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 15:38:02 +0200 Subject: [PATCH 30/43] coverage --- pypdf/_writer.py | 5 ++--- tests/test_page.py | 13 +++++++++++++ tests/test_writer.py | 4 +++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index b981cb0d50..b532b64469 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -484,6 +484,7 @@ def _add_page( self.flattened_pages.append(page) cpt = 1000 while node is not None: + node = cast(DictionaryObject, node.get_object()) node[NameObject(PA.COUNT)] = NumberObject(cast(int, node[PA.COUNT]) + 1) node = node.get(PA.PARENT, None) cpt -= 1 @@ -1441,9 +1442,7 @@ def _write_increment(self, stream: StreamType) -> int: [current_start, current_stop - current_start] ) current_start = idnum - current_stop = idnum + 1 - else: - current_stop = idnum + 1 + current_stop = idnum + 1 if current_start > 0: object_blocks.append([current_start, current_stop - current_start]) # write incremented xref diff --git a/tests/test_page.py b/tests/test_page.py index dc3ec9c558..39b1f4ec58 100644 --- a/tests/test_page.py +++ b/tests/test_page.py @@ -1459,3 +1459,16 @@ def test_get_contents_as_bytes(): assert writer.pages[0]._get_contents_as_bytes() == expected writer.pages[0][NameObject("/Contents")] = writer.pages[0]["/Contents"][0] assert writer.pages[0]._get_contents_as_bytes() == expected + + +def test_recursive_get_page_from_node(): + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf", incremental=True) + writer.root_object["/Pages"].get_object()[ + NameObject("/Parent") + ] = writer.root_object["/Pages"].indirect_reference + with pytest.raises(PyPdfError): + writer.add_page(writer.pages[0]) + writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf", incremental=True) + writer.insert_page(writer.pages[0], -1) + with pytest.raises(ValueError): + writer.insert_page(writer.pages[0], -10) diff --git a/tests/test_writer.py b/tests/test_writer.py index 1a172e8c35..160ef40232 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2356,7 +2356,7 @@ def test_utf16_metadata(): ) -def test_list_objects_in_increment(caplog): +def test_increment_writer(caplog): """Tests for #2811""" writer = PdfWriter( RESOURCE_ROOT / "Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf", @@ -2369,6 +2369,8 @@ def test_list_objects_in_increment(caplog): [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] ) assert writer.list_objects_in_increment() == [IndirectObject(4, 0, writer)] + b = BytesIO() + writer.write(b) writer.pages[5][NameObject("/MediaBox")] = ArrayObject( [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] ) From 290c5a6f423ab1af59431bdc76243c0b3a4a63c1 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 15:52:07 +0200 Subject: [PATCH 31/43] coverage --- tests/test_writer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_writer.py b/tests/test_writer.py index 160ef40232..64d06d9b6f 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -1795,6 +1795,9 @@ def test_missing_info(): writer = PdfWriter(clone_from=reader) assert len(writer.pages) == len(reader.pages) + reader = PdfReader(RESOURCE_ROOT / "crazyones.pdf") + writer._info = reader._info + assert dict(writer._info) == dict(reader._info) @pytest.mark.enable_socket() From 173578d43011132197a44d5e16d225b5e7a9a3df Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:24:39 +0200 Subject: [PATCH 32/43] coverage --- pypdf/_writer.py | 4 +++- tests/test_writer.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index b532b64469..4850f251b1 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -468,7 +468,9 @@ def _add_page( ] except Exception: pass - page = cast("PageObject", page_org.clone(self, False, excluded_keys)) + page = cast( + "PageObject", page_org.clone(self, False, excluded_keys).get_object() + ) if page_org.pdf is not None: other = page_org.pdf.pdf_header self.pdf_header = _get_max_pdf_version_header(self.pdf_header, other) diff --git a/tests/test_writer.py b/tests/test_writer.py index 64d06d9b6f..25fb306238 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2399,3 +2399,22 @@ def test_increment_writer(caplog): writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf", incremental=False) # 1 object is modified: page 0 inherits MediaBox so is changed assert len(writer.list_objects_in_increment()) == len(writer._objects) + + # insert pages in a tree + url = "https://github.com/py-pdf/pypdf/files/13946477/panda.pdf" + name = "iss2343b.pdf" + writer = PdfWriter(BytesIO(get_data_from_url(url, name=name)), incremental=True) + reader = PdfReader(RESOURCE_ROOT / "crazyones.pdf") + pg = writer.insert_page(reader.pages[0], 4) + assert ( + pg.raw_get("/Parent") + == writer.root_object["/Pages"]["/Kids"][0].get_object()["/Kids"][0] + ) + assert pg["/Parent"]["/Count"] == 8 + assert writer.root_object["/Pages"]["/Count"] == 285 + assert len(writer.flattened_pages) == 285 + + # clone without info + writer = PdfWriter(RESOURCE_ROOT / "missing_info.pdf", incremental=True) + assert len(writer.list_objects_in_increment()) == 1 + assert writer._info == {} From 1a6eda51cb215eefd18619d988facf8a84c5f2ae Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Mon, 26 Aug 2024 21:16:49 +0200 Subject: [PATCH 33/43] simplification --- pypdf/_writer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 4850f251b1..d400cf5f03 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -476,14 +476,13 @@ def _add_page( self.pdf_header = _get_max_pdf_version_header(self.pdf_header, other) node, idx = self._get_page_in_node(index) page[NameObject(PA.PARENT)] = node.indirect_reference + if idx >= 0: # to be a cast(ArrayObject, node[PA.KIDS]).insert(idx, page.indirect_reference) - if self.flattened_pages != node[PA.KIDS]: - self.flattened_pages.insert(index, page) + self.flattened_pages.insert(index, page) else: cast(ArrayObject, node[PA.KIDS]).append(page.indirect_reference) - if self.flattened_pages != node[PA.KIDS]: - self.flattened_pages.append(page) + self.flattened_pages.append(page) cpt = 1000 while node is not None: node = cast(DictionaryObject, node.get_object()) From d43d25b6f6c4fdd09424ccb369e14177175921c8 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Tue, 27 Aug 2024 09:39:21 +0200 Subject: [PATCH 34/43] coverage --- tests/test_writer.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_writer.py b/tests/test_writer.py index 25fb306238..794dd04694 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2367,6 +2367,16 @@ def test_increment_writer(caplog): ) # Contains JBIG2 not decoded for the moment assert writer.list_objects_in_increment() == [] # no flowdown of properties + + # test writing with empty increment + b = BytesIO() + writer.write(b) + b.seek(0) + writer2 = PdfWriter(b, incremental=True) + assert len([x for x in writer2._objects if x is not None]) == len( + [x for x in writer._objects if x is not None] + ) + # modify one object writer.pages[0][NameObject("/MediaBox")] = ArrayObject( [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] @@ -2378,6 +2388,9 @@ def test_increment_writer(caplog): [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] ) assert len(writer.list_objects_in_increment()) == 2 + # modify object IndirectObject(5,0) : for coverage + writer.get_object(5)[NameObject("/ForTestOnly")] = NameObject("/ForTestOnly") + b = BytesIO() writer.write(b) assert b.getvalue().startswith(writer._reader.stream.getvalue()) @@ -2386,6 +2399,7 @@ def test_increment_writer(caplog): assert reader.pages[0]["/MediaBox"] == ArrayObject( [NumberObject(0), NumberObject(0), NumberObject(864), NumberObject(648)] ) + assert "/ForTestOnly" in reader.get_object(5) with pytest.raises(PyPdfError): writer = PdfWriter(reader, incremental=True) b.seek(0) From f55d33274575789c16c04ce02b75d77c727db2f7 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Tue, 27 Aug 2024 16:50:20 +0200 Subject: [PATCH 35/43] ENH: Robustify on missing font for Tf operator in text_extract() (#2816) Closes #2815. --- pypdf/_page.py | 2 +- tests/test_workflows.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pypdf/_page.py b/pypdf/_page.py index c51aee1ab0..17ec04477f 100644 --- a/pypdf/_page.py +++ b/pypdf/_page.py @@ -1882,7 +1882,7 @@ def process_operation(operator: bytes, operands: List[Any]) -> None: cmap = ( unknown_char_map[2], unknown_char_map[3], - "???" + operands[0], + f"???{operands[0]}", None, ) try: diff --git a/tests/test_workflows.py b/tests/test_workflows.py index f01269893d..f307271e70 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -1298,3 +1298,12 @@ def test_extract_empty_page(): name = "iss2533.pdf" reader = PdfReader(BytesIO(get_data_from_url(url, name))) assert reader.pages[1].extract_text(extraction_mode="layout") == "" + + +@pytest.mark.enable_socket() +def test_iss2815(): + """Cf #2815""" + url = "https://github.com/user-attachments/files/16760725/crash-c1920c7a064649e1191d7879952ec252473fc7e6.pdf" + name = "iss2815.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name))) + assert reader.pages[0].extract_text() == "test command with wrong number of args" From 38ea8c5598db08b573f451cae456fa55adf6fbe0 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Wed, 28 Aug 2024 07:17:43 +0200 Subject: [PATCH 36/43] ENH: Add UniGB-UTF16 encodings (#2819) Closes #2812. --- pypdf/_cmap.py | 2 ++ tests/test_cmap.py | 14 ++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pypdf/_cmap.py b/pypdf/_cmap.py index 035850a4a0..6c5996703f 100644 --- a/pypdf/_cmap.py +++ b/pypdf/_cmap.py @@ -126,6 +126,8 @@ def build_char_map_from_dict( "/ETenms-B5-V": "cp950", "/UniCNS-UTF16-H": "utf-16-be", "/UniCNS-UTF16-V": "utf-16-be", + "/UniGB-UTF16-H": "gb18030", + "/UniGB-UTF16-V": "gb18030", # UCS2 in code } diff --git a/tests/test_cmap.py b/tests/test_cmap.py index 9ec55723fa..8042d306eb 100644 --- a/tests/test_cmap.py +++ b/tests/test_cmap.py @@ -214,7 +214,6 @@ def test_eten_b5(): reader.pages[0].extract_text().startswith("1/7 \n富邦新終身壽險") -@pytest.mark.enable_socket() def test_missing_entries_in_cmap(): """ Issue #2702: this issue is observed on damaged pdfs @@ -231,10 +230,21 @@ def test_missing_entries_in_cmap(): def test_null_missing_width(): - """For coverage of 2792""" + """For coverage of #2792""" writer = PdfWriter(RESOURCE_ROOT / "crazyones.pdf") page = writer.pages[0] ft = page["/Resources"]["/Font"]["/F1"] ft[NameObject("/Widths")] = ArrayObject() ft["/FontDescriptor"][NameObject("/MissingWidth")] = NullObject() page.extract_text() + + +@pytest.mark.enable_socket() +def test_unigb_utf16(): + """Cf #2812""" + url = ( + "https://github.com/user-attachments/files/16767536/W020240105322424121296.pdf" + ) + name = "iss2812.pdf" + reader = PdfReader(BytesIO(get_data_from_url(url, name=name))) + assert "《中国能源展望 2060(2024 年版)》编写委员会" in reader.pages[1].extract_text() From 82eac7e316f8f785d00ed600f8ba4aba3296a4a8 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Thu, 29 Aug 2024 18:29:23 +0200 Subject: [PATCH 37/43] ROB: Robustify .set_data() (#2821) Cope with objects where the filter is ["/FlateDecode"] and/or where data has not been read yet. --- pypdf/generic/_data_structures.py | 6 ++++-- tests/test_generic.py | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index 2c6e20e575..9ddd28d66a 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -1013,10 +1013,12 @@ def get_data(self) -> bytes: def set_data(self, data: bytes) -> None: # deprecated from ..filters import FlateDecode - if self.get(SA.FILTER, "") == FT.FLATE_DECODE: + if self.get(SA.FILTER, "") in (FT.FLATE_DECODE, [FT.FLATE_DECODE]): if not isinstance(data, bytes): raise TypeError("data must be bytes") - assert self.decoded_self is not None + if self.decoded_self is None: + self.get_data() # to create self.decoded_self + assert self.decoded_self is not None, "mypy" self.decoded_self.set_data(data) super().set_data(FlateDecode.encode(data)) else: diff --git a/tests/test_generic.py b/tests/test_generic.py index 6b8ae0151c..c14e249fe2 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -1326,6 +1326,22 @@ def test_encodedstream_set_data(): assert cc[NameObject("/Test")] == "/MyTest" +@pytest.mark.enable_socket() +def test_set_data_2(): + """ + Modify a stream not yet loaded and + where the filter is ["/FlateDecode"] + """ + url = "https://github.com/user-attachments/files/16796095/f5471sm-2.pdf" + name = "iss2780.pdf" + writer = PdfWriter(BytesIO(get_data_from_url(url, name=name))) + writer.root_object["/AcroForm"]["/XFA"][7].set_data(b"test") + assert writer.root_object["/AcroForm"]["/XFA"][7].get_object()["/Filter"] == [ + "/FlateDecode" + ] + assert writer.root_object["/AcroForm"]["/XFA"][7].get_object().get_data() == b"test" + + @pytest.mark.enable_socket() def test_calling_indirect_objects(): """Cope with cases where attributes/items are called from indirectObject""" From 14a93f1718b40beafea976e77ca9f2e71f2a1c4b Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 15:28:41 +0200 Subject: [PATCH 38/43] move to X-reference stream for increment this prevents "repairation" within acrobat --- pypdf/_writer.py | 57 +++++++++++++++++++++++-------- pypdf/generic/_data_structures.py | 3 +- 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index d400cf5f03..86aa120c09 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -31,6 +31,7 @@ import enum import hashlib import re +import struct import uuid from io import BytesIO, FileIO, IOBase from itertools import compress @@ -1351,8 +1352,8 @@ def write_stream(self, stream: StreamType) -> None: if self.incremental: self._reader.stream.seek(0) stream.write(self._reader.stream.read(-1)) - xref_location = self._write_increment(stream) - self._write_trailer(stream, xref_location) + if len(self.list_objects_in_increment()) > 0: + self._write_increment(stream) # writes objs, Xref stream and startx else: object_positions, free_objects = self._write_pdf_structure(stream) xref_location = self._write_xref_table( @@ -1413,7 +1414,7 @@ def list_objects_in_increment(self) -> List[IndirectObject]: ) ] - def _write_increment(self, stream: StreamType) -> int: + def _write_increment(self, stream: StreamType) -> None: object_positions = {} object_blocks = [] current_start = -1 @@ -1448,14 +1449,41 @@ def _write_increment(self, stream: StreamType) -> int: object_blocks.append([current_start, current_stop - current_start]) # write incremented xref xref_location = stream.tell() - stream.write(b"xref\n") - stream.write(b"0 1\n") - stream.write(b"0000000000 65535 f \n") - for block in object_blocks: - stream.write(f"{block[0]} {block[1]}\n".encode()) - for i in range(block[0], block[0] + block[1]): - stream.write(f"{object_positions[i]:0>10} {0:0>5} n \n".encode()) - return xref_location + xr_id = len(self._objects) + 1 + stream.write(f"{xr_id} 0 obj".encode()) + init_data = { + NameObject("/Type"): NameObject("/XRef"), + NameObject("/Size"): NumberObject(xr_id + 1), + NameObject("/Root"): self.root_object.indirect_reference, + NameObject("/Filter"): NameObject("/FlateDecode"), + NameObject("/Index"): ArrayObject( + [NumberObject(_it) for _su in object_blocks for _it in _su] + ), + NameObject("/W"): ArrayObject( + [NumberObject(1), NumberObject(4), NumberObject(1)] + ), + "__streamdata__": b"", + } + if self._info is not None and ( + not self.incremental + or self._info.hash_bin() # kept for future + != self._original_hash[ + cast(IndirectObject, self._info.indirect_reference).idnum - 1 + ] + ): + init_data[NameObject(TK.INFO)] = self._info.indirect_reference + if self.incremental: # kept for future + init_data[NameObject(TK.PREV)] = NumberObject(self._reader._startxref) + elif self._ID: + init_data[NameObject(TK.ID)] = self._ID + xr = StreamObject.initialize_from_dictionary(init_data) + xr.set_data( + b"".join( + [struct.pack(b">BIB", 1, _pos, 0) for _pos in object_positions.values()] + ) + ) + xr.write_to_stream(stream) + stream.write(f"\nstartxref\n{xref_location}\n%%EOF\n".encode()) # eof def _write_pdf_structure(self, stream: StreamType) -> Tuple[List[int], List[int]]: object_positions = [] @@ -1507,12 +1535,11 @@ def _write_trailer(self, stream: StreamType, xref_location: int) -> None: { NameObject(TK.SIZE): NumberObject(len(self._objects) + 1), NameObject(TK.ROOT): self.root_object.indirect_reference, - NameObject(TK.INFO): self._info_obj, } ) - if self.incremental: - trailer[NameObject(TK.PREV)] = NumberObject(self._reader._startxref) - if self._ID: + if self._info is not None: + trailer[NameObject(TK.INFO)] = self._info.indirect_reference + if self._ID is not None: trailer[NameObject(TK.ID)] = self._ID if self._encrypt_entry: trailer[NameObject(TK.ENCRYPT)] = self._encrypt_entry.indirect_reference diff --git a/pypdf/generic/_data_structures.py b/pypdf/generic/_data_structures.py index d048da8cb6..fc71bf5bf0 100644 --- a/pypdf/generic/_data_structures.py +++ b/pypdf/generic/_data_structures.py @@ -948,7 +948,8 @@ def initialize_from_dictionary( retval = DecodedStreamObject() retval._data = data["__streamdata__"] del data["__streamdata__"] - del data[SA.LENGTH] + if SA.LENGTH in data: + del data[SA.LENGTH] retval.update(data) return retval From 53e141fe12f05b633f0289bbb5d3ad35d51a3e13 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 15:54:09 +0200 Subject: [PATCH 39/43] coverage --- pypdf/_writer.py | 11 +++++------ tests/test_writer.py | 8 ++++++++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 86aa120c09..ad48882dcd 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1464,17 +1464,16 @@ def _write_increment(self, stream: StreamType) -> None: ), "__streamdata__": b"", } - if self._info is not None and ( - not self.incremental - or self._info.hash_bin() # kept for future + if ( + self._info is not None + and self._info.hash_bin() # kept for future != self._original_hash[ cast(IndirectObject, self._info.indirect_reference).idnum - 1 ] ): init_data[NameObject(TK.INFO)] = self._info.indirect_reference - if self.incremental: # kept for future - init_data[NameObject(TK.PREV)] = NumberObject(self._reader._startxref) - elif self._ID: + init_data[NameObject(TK.PREV)] = NumberObject(self._reader._startxref) + if self._ID: init_data[NameObject(TK.ID)] = self._ID xr = StreamObject.initialize_from_dictionary(init_data) xr.set_data( diff --git a/tests/test_writer.py b/tests/test_writer.py index 794dd04694..6cedc9443a 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2371,11 +2371,19 @@ def test_increment_writer(caplog): # test writing with empty increment b = BytesIO() writer.write(b) + with open( + RESOURCE_ROOT / "Seige_of_Vicksburg_Sample_OCR-crazyones-merged.pdf", "rb" + ) as f: + assert b.getvalue() == f.read(-1) b.seek(0) writer2 = PdfWriter(b, incremental=True) assert len([x for x in writer2._objects if x is not None]) == len( [x for x in writer._objects if x is not None] ) + writer2.add_metadata({"/Author": "test"}) + assert len(writer2.list_objects_in_increment()) == 1 + b = BytesIO() + writer2.write(b) # modify one object writer.pages[0][NameObject("/MediaBox")] = ArrayObject( From b4b7c1bf96cd468fdb7687f391c0238cfc38ad57 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 16:09:25 +0200 Subject: [PATCH 40/43] coverage --- tests/test_writer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_writer.py b/tests/test_writer.py index 6cedc9443a..7b9cbf003a 100644 --- a/tests/test_writer.py +++ b/tests/test_writer.py @@ -2440,3 +2440,5 @@ def test_increment_writer(caplog): writer = PdfWriter(RESOURCE_ROOT / "missing_info.pdf", incremental=True) assert len(writer.list_objects_in_increment()) == 1 assert writer._info == {} + b = BytesIO() + writer.write(b) From 7bc3abddae4fa04f4e8d416bb4280c1d0444bc38 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 16:12:13 +0200 Subject: [PATCH 41/43] coverage --- pypdf/_writer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index ad48882dcd..74c066e507 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1445,8 +1445,8 @@ def _write_increment(self, stream: StreamType) -> None: ) current_start = idnum current_stop = idnum + 1 - if current_start > 0: - object_blocks.append([current_start, current_stop - current_start]) + assert current_start > 0, "for pytest only" + object_blocks.append([current_start, current_stop - current_start]) # write incremented xref xref_location = stream.tell() xr_id = len(self._objects) + 1 From ffa2f0c5506a0aeae6139f606006df85cf05c421 Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 16:55:09 +0200 Subject: [PATCH 42/43] fix --- pypdf/_writer.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 74c066e507..409244727e 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1464,11 +1464,16 @@ def _write_increment(self, stream: StreamType) -> None: ), "__streamdata__": b"", } - if ( - self._info is not None - and self._info.hash_bin() # kept for future + # below just to trick mypy for code simplification : will be reworked in next PR + assert isinstance( + cast(IndirectObject, self._info).indirect_reference, IndirectObject + ), "for mypy" + if self._info is not None and ( + cast(IndirectObject, self._info).indirect_reference.idnum - 1 + >= len(self._original_hash) + or cast(IndirectObject, self._info).hash_bin() # kept for future != self._original_hash[ - cast(IndirectObject, self._info.indirect_reference).idnum - 1 + cast(IndirectObject, self._info).indirect_reference.idnum - 1 ] ): init_data[NameObject(TK.INFO)] = self._info.indirect_reference From b072952b9c101a3530d07b1d4c1c975f1153352f Mon Sep 17 00:00:00 2001 From: pubpub-zz <4083478+pubpub-zz@users.noreply.github.com> Date: Sun, 1 Sep 2024 16:57:48 +0200 Subject: [PATCH 43/43] mypy --- pypdf/_writer.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pypdf/_writer.py b/pypdf/_writer.py index 409244727e..886fcbca70 100644 --- a/pypdf/_writer.py +++ b/pypdf/_writer.py @@ -1464,16 +1464,12 @@ def _write_increment(self, stream: StreamType) -> None: ), "__streamdata__": b"", } - # below just to trick mypy for code simplification : will be reworked in next PR - assert isinstance( - cast(IndirectObject, self._info).indirect_reference, IndirectObject - ), "for mypy" if self._info is not None and ( - cast(IndirectObject, self._info).indirect_reference.idnum - 1 + self._info.indirect_reference.idnum - 1 # type: ignore >= len(self._original_hash) or cast(IndirectObject, self._info).hash_bin() # kept for future != self._original_hash[ - cast(IndirectObject, self._info).indirect_reference.idnum - 1 + self._info.indirect_reference.idnum - 1 # type: ignore ] ): init_data[NameObject(TK.INFO)] = self._info.indirect_reference