diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 54951d7668..41d85f5d6f 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -40,8 +40,9 @@ web-time = { workspace = true } webpki-roots = { workspace = true } [dev-dependencies] -rstest = { workspace = true } +bincode = { workspace = true } hex = { workspace = true } +rstest = { workspace = true } tlsn-data-fixtures = { workspace = true } [[test]] diff --git a/crates/core/src/connection.rs b/crates/core/src/connection.rs index 30a30324b2..9c4f1a1bd3 100644 --- a/crates/core/src/connection.rs +++ b/crates/core/src/connection.rs @@ -314,11 +314,11 @@ impl ServerCertData { return Err(CertificateVerificationError::InvalidServerEphemeralKey); } - // Verify server name + // Verify server name. let server_name = tls_core::dns::ServerName::try_from(server_name.as_ref()) .map_err(|_| CertificateVerificationError::InvalidIdentity(server_name.clone()))?; - // Verify server certificate + // Verify server certificate. let cert_chain = self .certs .clone() diff --git a/crates/core/src/merkle.rs b/crates/core/src/merkle.rs index ba48fd0d02..e4a9fe0a70 100644 --- a/crates/core/src/merkle.rs +++ b/crates/core/src/merkle.rs @@ -19,7 +19,7 @@ impl MerkleError { #[derive(Clone, Serialize, Deserialize)] pub(crate) struct MerkleProof { alg: HashAlgId, - tree_len: usize, + leaf_count: usize, proof: rs_merkle::MerkleProof, } @@ -55,13 +55,24 @@ impl MerkleProof { root.value, &indices, &leaves, - self.tree_len, + self.leaf_count, ) { return Err(MerkleError::new("invalid merkle proof")); } Ok(()) } + + /// Returns the leaf count of the Merkle tree associated with the proof. + pub(crate) fn leaf_count(&self) -> usize { + self.leaf_count + } + + #[cfg(test)] + /// Sets the leaf count. Testing only. + pub(crate) fn set_leaf_count(&mut self, count: usize) { + self.leaf_count = count; + } } #[derive(Clone)] @@ -118,15 +129,21 @@ impl MerkleTree { /// # Panics /// /// - If the provided indices are not unique and sorted. + /// - If the provided indices are out of bounds. pub(crate) fn proof(&self, indices: &[usize]) -> MerkleProof { assert!( indices.windows(2).all(|w| w[0] < w[1]), "indices must be unique and sorted" ); + assert!( + *indices.last().unwrap() < self.tree.leaves_len(), + "one or more provided indices are out of bounds" + ); + MerkleProof { alg: self.alg, - tree_len: self.tree.leaves_len(), + leaf_count: self.tree.leaves_len(), proof: self.tree.proof(indices), } } @@ -213,6 +230,21 @@ mod test { _ = tree.proof(&[2, 4, 3]); } + #[rstest] + #[case::sha2(Sha256::default())] + #[case::blake3(Blake3::default())] + #[case::keccak(Keccak256::default())] + #[should_panic] + fn test_proof_fail_index_out_of_bounds(#[case] hasher: H) { + let mut tree = MerkleTree::new(hasher.id()); + + let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]); + + tree.insert(&hasher, leaves.clone()); + + _ = tree.proof(&[2, 3, 4, 6]); + } + #[rstest] #[case::sha2(Sha256::default())] #[case::blake3(Blake3::default())] @@ -257,12 +289,20 @@ mod test { tree.insert(&hasher, leaves.clone()); - let mut proof = tree.proof(&[2, 3, 4]); + let mut proof1 = tree.proof(&[2, 3, 4]); + let mut proof2 = proof1.clone(); - proof.tree_len += 1; + // Increment the `leaf_count` field. + proof1.leaf_count += 1; + // Decrement the `leaf_count` field. + proof2.leaf_count -= 1; // Fail because leaf count is wrong. - assert!(proof + assert!(proof1 + .verify(&hasher, &tree.root(), choose_leaves([2, 3, 4], &leaves)) + .is_err()); + + assert!(proof2 .verify(&hasher, &tree.root(), choose_leaves([2, 3, 4], &leaves)) .is_err()); } diff --git a/crates/core/src/transcript/commit.rs b/crates/core/src/transcript/commit.rs index 3106d39960..fdc585b0a1 100644 --- a/crates/core/src/transcript/commit.rs +++ b/crates/core/src/transcript/commit.rs @@ -10,6 +10,15 @@ use crate::{ transcript::{Direction, Idx, Transcript}, }; +/// The maximum allowed total bytelength of committed data for a single +/// commitment kind. Used to prevent DoS during verification. (May cause the +/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds +/// of encoding commitments.) +/// +/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 << +/// 31) - 1 by default) +pub(crate) const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000; + /// Kind of transcript commitment. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum TranscriptCommitmentKind { diff --git a/crates/core/src/transcript/encoding.rs b/crates/core/src/transcript/encoding.rs index 9041eba75c..b9d3cbbb3b 100644 --- a/crates/core/src/transcript/encoding.rs +++ b/crates/core/src/transcript/encoding.rs @@ -17,15 +17,6 @@ use serde::{Deserialize, Serialize}; use crate::hash::{impl_domain_separator, TypedHash}; -/// The maximum allowed total bytelength of committed data for a single -/// commitment kind. Used to prevent DoS during verification. (May cause the -/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds -/// of encoding commitments.) -/// -/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 << -/// 31) - 1 by default) -const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000; - /// Transcript encoding commitment. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct EncodingCommitment { diff --git a/crates/core/src/transcript/encoding/proof.rs b/crates/core/src/transcript/encoding/proof.rs index 231340c4b2..53ac7a13fe 100644 --- a/crates/core/src/transcript/encoding/proof.rs +++ b/crates/core/src/transcript/encoding/proof.rs @@ -7,9 +7,8 @@ use crate::{ hash::{Blinded, Blinder, HashAlgorithmExt, HashProviderError}, merkle::{MerkleError, MerkleProof}, transcript::{ - encoding::{ - new_encoder, tree::EncodingLeaf, Encoder, EncodingCommitment, MAX_TOTAL_COMMITTED_DATA, - }, + commit::MAX_TOTAL_COMMITTED_DATA, + encoding::{new_encoder, tree::EncodingLeaf, Encoder, EncodingCommitment}, Direction, PartialTranscript, Subsequence, }, CryptoProvider, @@ -27,6 +26,7 @@ opaque_debug::implement!(Opening); /// An encoding commitment proof. #[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(try_from = "validation::EncodingProofUnchecked")] pub struct EncodingProof { /// The proof of inclusion of the commitment(s) in the Merkle tree of /// commitments. @@ -182,3 +182,92 @@ impl From for EncodingProofError { Self::new(ErrorKind::Proof, error) } } + +/// Invalid encoding proof error. +#[derive(Debug, thiserror::Error)] +#[error("invalid encoding proof: {0}")] +pub struct InvalidEncodingProof(&'static str); + +mod validation { + use super::*; + + /// The maximum allowed height of the Merkle tree of encoding commitments. + /// + /// The statistical security parameter (SSP) of the encoding commitment + /// protocol is calculated as "the number of uniformly random bits in a + /// single bit's encoding minus `MAX_HEIGHT`". + /// + /// For example, a bit encoding used in garbled circuits typically has 127 + /// uniformly random bits, hence when using it in the encoding + /// commitment protocol, the SSP is 117 bits. + /// + /// DO NOT use bit encodings which have less than 50 uniformly random bits, + /// since the SSP < 40 bits is widely considered inadequate. + const MAX_HEIGHT: usize = 10; + + #[derive(Debug, Deserialize)] + pub(super) struct EncodingProofUnchecked { + inclusion_proof: MerkleProof, + openings: HashMap, + } + + impl TryFrom for EncodingProof { + type Error = InvalidEncodingProof; + + fn try_from(unchecked: EncodingProofUnchecked) -> Result { + if unchecked.inclusion_proof.leaf_count() > 1 << MAX_HEIGHT { + return Err(InvalidEncodingProof( + "the height of the tree exceeds the maximum allowed", + )); + } + + Ok(Self { + inclusion_proof: unchecked.inclusion_proof, + openings: unchecked.openings, + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + hash::{impl_domain_separator, Hash, HashAlgorithm, Sha256}, + merkle::MerkleTree, + }; + + #[derive(Serialize)] + struct T(u64); + + impl_domain_separator!(T); + + fn leaves(hasher: &H, leaves: impl IntoIterator) -> Vec { + leaves + .into_iter() + .map(|x| hasher.hash_canonical(&x)) + .collect() + } + + #[test] + // Expect to fail since EncodingProof did not pass validation. + fn test_proof_validation_fail() { + let hasher = Sha256::default(); + + let mut tree = MerkleTree::new(hasher.id()); + tree.insert(&hasher, leaves(&hasher, [T(0)])); + + let mut proof = tree.proof(&[0]); + proof.set_leaf_count((1 << 20) + 1); + + let proof = EncodingProof { + inclusion_proof: proof, + openings: HashMap::default(), + }; + + let bytes = bincode::serialize(&proof).expect("proof should be serializable"); + + let proof: Result> = bincode::deserialize(&bytes); + assert!(proof.is_err()) + } +} diff --git a/crates/core/src/transcript/proof.rs b/crates/core/src/transcript/proof.rs index 980987dd68..d3317cc096 100644 --- a/crates/core/src/transcript/proof.rs +++ b/crates/core/src/transcript/proof.rs @@ -10,7 +10,7 @@ use crate::{ hash::Blinded, index::Index, transcript::{ - commit::TranscriptCommitmentKind, + commit::{TranscriptCommitmentKind, MAX_TOTAL_COMMITTED_DATA}, encoding::{EncodingProof, EncodingProofError, EncodingTree}, hash::{PlaintextHashProof, PlaintextHashProofError, PlaintextHashSecret}, Direction, Idx, PartialTranscript, Transcript, @@ -61,6 +61,8 @@ impl TranscriptProof { } // Verify hash openings. + let mut total_opened = 0u128; + for proof in self.hash_proofs { let commitment = attestation_body .plaintext_hashes() @@ -73,6 +75,15 @@ impl TranscriptProof { ) })?; + // Make sure the amount of data being proved is bounded. + total_opened += commitment.idx.len() as u128; + if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 { + return Err(TranscriptProofError::new( + ErrorKind::Hash, + "exceeded maximum allowed data", + ))?; + } + let (direction, seq) = proof.verify(&provider.hash, commitment)?; transcript.union_subsequence(direction, &seq); } diff --git a/crates/verifier/src/verify.rs b/crates/verifier/src/verify.rs index e3ebf893c3..b822295f3d 100644 --- a/crates/verifier/src/verify.rs +++ b/crates/verifier/src/verify.rs @@ -94,7 +94,7 @@ impl Verifier { data, } = mux_fut .poll_with(async { - // Finalize all MPC + // Finalize all MPC. ot_send.reveal(&mut ctx).await?; vm.finalize().await?;