diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 27184ff6e..a30fa96a8 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.4] - 2023-01-28 +- Fixed unnecessary decoding and allocations for bounded types, when the decoded length is greater than the allowed bound. +- Add `Hash` derivation (when `feature = "std"`) for bounded types. + ## [0.1.3] - 2023-01-27 - Removed non-existent `bounded` mod reference. [#715](https://github.com/paritytech/parity-common/pull/715) @@ -16,4 +20,4 @@ The format is based on [Keep a Changelog]. ## [0.1.0] - 2023-01-26 - Wrote better description for `bounded-collections`. [#709](https://github.com/paritytech/parity-common/pull/709) -- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) \ No newline at end of file +- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 7f40fe474..ffd739673 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.3" +version = "0.1.4" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ rust-version = "1.60.0" [dependencies] serde = { version = "1.0.101", default-features = false, optional = true } -codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } +codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } log = { version = "0.4.17", default-features = false } diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index e298bc60e..f306fe956 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -19,7 +19,7 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeMap; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// A bounded map based on a B-Tree. @@ -29,6 +29,7 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the /// map. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeMap(BTreeMap, PhantomData); @@ -40,10 +41,15 @@ where S: Get, { fn decode(input: &mut I) -> Result { - let inner = BTreeMap::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `BTreeMap`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedBTreeMap exceeds its limit".into()) } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); Ok(Self(inner, PhantomData)) } @@ -394,6 +400,7 @@ mod test { use super::*; use crate::ConstU32; use alloc::{vec, vec::Vec}; + use codec::CompactLen; fn map_from_keys(keys: &[K]) -> BTreeMap where @@ -410,6 +417,14 @@ mod test { map_from_keys(keys).try_into().unwrap() } + #[test] + fn encoding_same_as_unbounded_map() { + let b = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + #[test] fn try_insert_works() { let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); @@ -460,6 +475,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = m.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeMap::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn unequal_eq_impl_insert_works() { // given a struct with a strange notion of equality diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index a4876fcd3..654ae1b3b 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -19,7 +19,7 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeSet; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// A bounded set based on a B-Tree. @@ -29,6 +29,7 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the /// set. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeSet(BTreeSet, PhantomData); @@ -39,10 +40,15 @@ where S: Get, { fn decode(input: &mut I) -> Result { - let inner = BTreeSet::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `BTreeSet`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedBTreeSet exceeds its limit".into()) } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); Ok(Self(inner, PhantomData)) } @@ -324,6 +330,7 @@ mod test { use super::*; use crate::ConstU32; use alloc::{vec, vec::Vec}; + use codec::CompactLen; fn set_from_keys(keys: &[T]) -> BTreeSet where @@ -340,6 +347,14 @@ mod test { set_from_keys(keys).try_into().unwrap() } + #[test] + fn encoding_same_as_unbounded_set() { + let b = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = set_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + #[test] fn try_insert_works() { let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); @@ -390,6 +405,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let s = set_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = s.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeSet::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn unequal_eq_impl_insert_works() { // given a struct with a strange notion of equality diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 5122ed3f4..4bbe389f3 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -21,7 +21,7 @@ use super::WeakBoundedVec; use crate::{Get, TryCollect}; use alloc::{boxed::Box, vec::Vec}; -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use codec::{decode_vec_with_len, Compact, Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ marker::PhantomData, ops::{Deref, Index, IndexMut, RangeBounds}, @@ -40,7 +40,7 @@ use serde::{ /// /// As the name suggests, the length of the queue is always bounded. All internal operations ensure /// this bound is respected. -#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[cfg_attr(feature = "std", derive(Hash, Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData); @@ -108,6 +108,7 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); @@ -290,10 +291,13 @@ impl<'a, T, S: Get> BoundedSlice<'a, T, S> { impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { - let inner = Vec::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `Vec`, except we fail early if the + // len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedVec exceeds its limit".into()) } + let inner = decode_vec_with_len(input, len as usize)?; Ok(Self(inner, PhantomData)) } @@ -904,6 +908,15 @@ where mod test { use super::*; use crate::{bounded_vec, ConstU32}; + use codec::CompactLen; + + #[test] + fn encoding_same_as_unbounded_vec() { + let b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + let v: Vec = vec![0, 1, 2, 3, 4, 5]; + + assert_eq!(b.encode(), v.encode()); + } #[test] fn slice_truncate_from_works() { @@ -1101,6 +1114,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let data = v.encode(); + let data_input = &mut &data[..]; + + BoundedVec::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn eq_works() { // of same type diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 50c5d2180..5f41774c1 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"] } +parity-scale-codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"] } [features] default = ["std"]