From 3087a98bc7f52c4d29e1142d18a0a15fb640dbae Mon Sep 17 00:00:00 2001 From: jsvisa Date: Tue, 23 Sep 2025 15:55:17 +0800 Subject: [PATCH 1/5] feat(decode): check the data length in non-strict mode --- eth_abi/decoding.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/eth_abi/decoding.py b/eth_abi/decoding.py index 0874f7a313..3595af9a08 100644 --- a/eth_abi/decoding.py +++ b/eth_abi/decoding.py @@ -607,6 +607,13 @@ def read_data_from_stream(self, stream): raise NonEmptyPaddingBytes( f"Padding bytes were not empty: {repr(padding_bytes)}" ) + else: + # In non-strict mode, be more flexible with truncated data + if len(data) < data_length: + # Not even enough data for the declared content length + raise InsufficientDataBytes( + f"Tried to read {data_length} bytes of content, only got {len(data)} bytes" + ) return data[:data_length] From 9c9a81ee90b21670c0b36a540c3386533d147986 Mon Sep 17 00:00:00 2001 From: jsvisa Date: Tue, 23 Sep 2025 15:56:01 +0800 Subject: [PATCH 2/5] fix(registry): recursive set strict for nested decoders --- eth_abi/registry.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/eth_abi/registry.py b/eth_abi/registry.py index a145c1f44a..17a8b66a34 100644 --- a/eth_abi/registry.py +++ b/eth_abi/registry.py @@ -481,8 +481,42 @@ def _get_decoder_uncached(self, type_str, strict=True): # other decoders should keep the default value of ``True``. decoder.strict = strict + # Recursively set strict on nested decoders + self._set_strict_on_nested_decoders(decoder, strict) + return decoder + def _set_strict_on_nested_decoders(self, decoder, strict): + """ + Recursively set the strict attribute on nested decoders. + """ + if hasattr(decoder, "item_decoder") and decoder.item_decoder: + if hasattr(decoder.item_decoder, "strict"): + decoder.item_decoder.strict = strict + if ( + hasattr(decoder.item_decoder, "tail_decoder") + and decoder.item_decoder.tail_decoder + ): + if hasattr(decoder.item_decoder.tail_decoder, "strict"): + decoder.item_decoder.tail_decoder.strict = strict + self._set_strict_on_nested_decoders( + decoder.item_decoder.tail_decoder, strict + ) + + if hasattr(decoder, "decoders") and decoder.decoders: + for nested_decoder in decoder.decoders: + if hasattr(nested_decoder, "strict"): + nested_decoder.strict = strict + if ( + hasattr(nested_decoder, "tail_decoder") + and nested_decoder.tail_decoder + ): + if hasattr(nested_decoder.tail_decoder, "strict"): + nested_decoder.tail_decoder.strict = strict + self._set_strict_on_nested_decoders( + nested_decoder.tail_decoder, strict + ) + def copy(self): """ Copies a registry such that new registrations can be made or existing From aff70adeb4381e3f6d7d18db372b4f1cff24d1a8 Mon Sep 17 00:00:00 2001 From: jsvisa Date: Tue, 23 Sep 2025 15:56:42 +0800 Subject: [PATCH 3/5] tests: add decode missing padding test --- tests/core/abi_tests/test_decode.py | 50 +++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/core/abi_tests/test_decode.py b/tests/core/abi_tests/test_decode.py index 3734b4a5ed..477dfa8b37 100644 --- a/tests/core/abi_tests/test_decode.py +++ b/tests/core/abi_tests/test_decode.py @@ -198,6 +198,56 @@ def test_abi_decode_with_shorter_data_than_32_bytes(types, hex_data, expected): decode(types, bytes.fromhex(hex_data)) +def test_abi_decode_bytes_array_with_truncated_padding(): + """ + Test that bytes[] arrays with complete content but missing padding + can be decoded in non-strict mode but raise an error in strict mode. + + This is a regression test for the fix that allows decoding ABI-encoded + bytes[] data where content is complete but trailing padding is missing. + """ + # Data from decode_multicall.py - represents bytes[] with truncated padding + data = bytes.fromhex( + "0000000000000000000000000000000000000000000000000000000000000020" # offset to bytes[] array + "0000000000000000000000000000000000000000000000000000000000000001" # array length (1) + "0000000000000000000000000000000000000000000000000000000000000020" # offset to first element + "0000000000000000000000000000000000000000000000000000000000000104" # element length (260 bytes) + # 260 bytes of content (truncated from original due to missing padding) + "b858183f0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000080" + "00000000000000000000000078ba4c2b0cc3385ca967d250b2313f187d5002f3" + "000000000000000000000000000000000000000000000000000000000a59d6f2" + "000000000000000000000000000000000000000000000000008f3316b7531da4" + "000000000000000000000000000000000000000000000000000000000000002b" + "a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000064c02aaa39b223fe8d0a" + "0e5c4f27ead9083c756cc2000000000000000000000000000000000000000000" + # Note: Missing ~26 bytes of padding here that would normally pad to 288 total bytes + ) + + with pytest.raises(InsufficientDataBytes, match="Tried to read 288 bytes"): + decode(["bytes[]"], data, strict=True) + + result = decode(["bytes[]"], data, strict=False) + + # Validate the structure + assert len(result) == 1 # One top-level element (the bytes[] array) + assert len(result[0]) == 1 # One element in the bytes[] array + assert len(result[0][0]) == 260 # The element should be 260 bytes + + # Validate the decoded content matches expected hex + expected_hex = ( + "b858183f0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000080" + "00000000000000000000000078ba4c2b0cc3385ca967d250b2313f187d5002f3" + "000000000000000000000000000000000000000000000000000000000a59d6f2" + "000000000000000000000000000000000000000000000000008f3316b7531da4" + "000000000000000000000000000000000000000000000000000000000000002b" + "a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000064c02aaa39b223fe8d0a" + "0e5c4f27ead9083c756cc2000000000000000000000000000000000000000000" + ) + assert result[0][0].hex() == expected_hex + + @pytest.mark.parametrize( "typestring,malformed_payload", ( From 2100b65d8cb9aca320ed0ea681235299a195e394 Mon Sep 17 00:00:00 2001 From: jsvisa Date: Tue, 23 Sep 2025 16:32:32 +0800 Subject: [PATCH 4/5] fix: copy a decoder --- eth_abi/registry.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eth_abi/registry.py b/eth_abi/registry.py index 17a8b66a34..7f6b6a623a 100644 --- a/eth_abi/registry.py +++ b/eth_abi/registry.py @@ -476,6 +476,10 @@ def _get_decoder_uncached(self, type_str, strict=True): decoder = self._get_registration(self._decoders, type_str) if hasattr(decoder, "is_dynamic") and decoder.is_dynamic: + # Create a copy of the decoder to avoid mutating shared cached instances + # This prevents issues where strict=False calls affect subsequent strict=True calls + decoder = copy.deepcopy(decoder) + # Set a transient flag each time a call is made to ``get_decoder()``. # Only dynamic decoders should be allowed these looser constraints. All # other decoders should keep the default value of ``True``. From ae31aa356a4ba7e1fb57e7365113764d417d445f Mon Sep 17 00:00:00 2001 From: jsvisa Date: Tue, 23 Sep 2025 16:50:17 +0800 Subject: [PATCH 5/5] flake8 --- eth_abi/decoding.py | 2 +- eth_abi/registry.py | 3 ++- tests/core/abi_tests/test_decode.py | 10 +++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/eth_abi/decoding.py b/eth_abi/decoding.py index 3595af9a08..1fcf16e559 100644 --- a/eth_abi/decoding.py +++ b/eth_abi/decoding.py @@ -612,7 +612,7 @@ def read_data_from_stream(self, stream): if len(data) < data_length: # Not even enough data for the declared content length raise InsufficientDataBytes( - f"Tried to read {data_length} bytes of content, only got {len(data)} bytes" + f"Tried to read {data_length} bytes, only got {len(data)} bytes" ) return data[:data_length] diff --git a/eth_abi/registry.py b/eth_abi/registry.py index 7f6b6a623a..36a09c859a 100644 --- a/eth_abi/registry.py +++ b/eth_abi/registry.py @@ -477,7 +477,8 @@ def _get_decoder_uncached(self, type_str, strict=True): if hasattr(decoder, "is_dynamic") and decoder.is_dynamic: # Create a copy of the decoder to avoid mutating shared cached instances - # This prevents issues where strict=False calls affect subsequent strict=True calls + # This prevents issues where strict=False calls affect subsequent + # strict=True calls decoder = copy.deepcopy(decoder) # Set a transient flag each time a call is made to ``get_decoder()``. diff --git a/tests/core/abi_tests/test_decode.py b/tests/core/abi_tests/test_decode.py index 477dfa8b37..432af1d217 100644 --- a/tests/core/abi_tests/test_decode.py +++ b/tests/core/abi_tests/test_decode.py @@ -208,10 +208,10 @@ def test_abi_decode_bytes_array_with_truncated_padding(): """ # Data from decode_multicall.py - represents bytes[] with truncated padding data = bytes.fromhex( - "0000000000000000000000000000000000000000000000000000000000000020" # offset to bytes[] array - "0000000000000000000000000000000000000000000000000000000000000001" # array length (1) - "0000000000000000000000000000000000000000000000000000000000000020" # offset to first element - "0000000000000000000000000000000000000000000000000000000000000104" # element length (260 bytes) + "0000000000000000000000000000000000000000000000000000000000000020" # offset to bytes[] array # noqa: E501 + "0000000000000000000000000000000000000000000000000000000000000001" # array length (1) # noqa: E501 + "0000000000000000000000000000000000000000000000000000000000000020" # offset to first element # noqa: E501 + "0000000000000000000000000000000000000000000000000000000000000104" # element length (260 bytes) # noqa: E501 # 260 bytes of content (truncated from original due to missing padding) "b858183f0000000000000000000000000000000000000000000000000000000000000020" "0000000000000000000000000000000000000000000000000000000000000080" @@ -221,7 +221,7 @@ def test_abi_decode_bytes_array_with_truncated_padding(): "000000000000000000000000000000000000000000000000000000000000002b" "a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000064c02aaa39b223fe8d0a" "0e5c4f27ead9083c756cc2000000000000000000000000000000000000000000" - # Note: Missing ~26 bytes of padding here that would normally pad to 288 total bytes + # Note: Missing ~26 bytes of padding here that would normally pad to 288 total bytes # noqa: E501 ) with pytest.raises(InsufficientDataBytes, match="Tried to read 288 bytes"):