diff --git a/Cargo.toml b/Cargo.toml index 012984c..30116fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,10 +11,19 @@ repository = "https://github.com/diba-io/carbonado.git" [dependencies] anyhow = "1" bao = "0.12.1" +bech32 = "0.9" +bitmask-enum = "2.1.0" combination = "0.2.2" ecies = { version = "0.2.2", default-features = false, features = ["pure"] } +hex = "0.4" log = "0.4" pretty_env_logger = "0.4" +secp256k1 = { version = "0.25.0", features = [ + "global-context", + "rand-std", + "bitcoin-hashes-std", + "serde", +] } serde = "1" snap = "1" zfec-rs = "0.1.0" diff --git a/README.md b/README.md index 9da379f..e106c75 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,10 @@ Carbonado has features to make it resistant against: All without needing a blockchain, however, they can be useful for periodically checkpointing data in a durable place. +### Documentation + +More detailed information on formats and operations can be found in the [carbonado crate docs](https://docs.rs/carbonado), hosted on . + ### Checkpoints Carbonado supports an optional Bitcoin-compatible HD wallet with a specific derivation path that can be used to secure timestamped Carbonado Checkpoints using an on-chain OP_RETURN. diff --git a/src/constants.rs b/src/constants.rs index e8e3c87..98c572c 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,3 +1,53 @@ -pub const SLICE_LEN: usize = 1024; // Bao slice length +use bitmask_enum::bitmask; + +pub const SLICE_LEN: u16 = 1024; // Bao slice length pub const FEC_K: usize = 4; // Zfec chunks needed pub const FEC_M: usize = 8; // Zfec chunks produced + +/// ## Bitmask for Carbonado formats c0-c15 +/// +/// | Format | Encryption | Compression | Verifiability | Error correction | Use-cases | +/// |-----|----|----|----|----|----| +/// | c0 | | | | | Marks a file as scanned by Carbonado | +/// | c1 | ✅ | | | | Encrypted incompressible throwaway append-only data streams such as CCTV footage | +/// | c2 | | ✅ | | | Rotating public logs | +/// | c3 | ✅ | ✅ | | | Private archives | +/// | c4 | | | ✅ | | Unencrypted incompressible data such as NFT/UDA image assets | +/// | c5 | ✅ | | ✅ | | Private media backups | +/// | c6 | | ✅ | ✅ | | Compiled binaries | +/// | c7 | ✅ | ✅ | ✅ | | Full drive backups | +/// | c8 | | | | ✅ | ??? | +/// | c9 | ✅ | | | ✅ | ??? | +/// | c10 | | ✅ | | ✅ | ??? | +/// | c11 | ✅ | ✅ | | ✅ | Encrypted device-local Catalogs | +/// | c12 | | | ✅ | ✅ | Publicly-available archival media | +/// | c13 | ✅ | | ✅ | ✅ | Georedundant private media backups | +/// | c14 | | ✅ | ✅ | ✅ | Source code, token genesis | +/// | c15 | ✅ | ✅ | ✅ | ✅ | Contract data | +/// +/// These operations correspond to the following implementations: +/// +/// | Implementation | Operation | +/// |-------|-------| +/// | ecies | Encryption | +/// | snap | Compression | +/// | bao | Verifiability | +/// | zfec | Error correction | +/// +/// While the implementations are called in a different order, as outlined in [encoding::encode](crate::encode), operations are ordered this way in the bitmask in order to make the format more intuitive. +/// +/// Verifiability is needed to pay others for storing or hosting your files, but it inhibits use-cases for mutable or append-only data other than snapshots, since the hash will change so frequently. Bao encoding does not have a large overhead, about 5% at most. +/// +/// Any data that is verifiable but also unencrypted is instead signed by the local key. This is good for signed compiled binaries or hosted webpages. +#[bitmask(u8)] +pub enum Format { + Ecies, + Snappy, + Bao, + Zfec, +} + +/// "Magic number" used by the Carbonado file format. +pub const MAGICNO: [u8; 12] = [ + b'C', b'A', b'R', b'B', b'O', b'N', b'A', b'D', b'O', b'0', b'0', b'\n', +]; diff --git a/src/decode.rs b/src/decoding.rs similarity index 67% rename from src/decode.rs rename to src/decoding.rs index b18a334..1b0b3fb 100644 --- a/src/decode.rs +++ b/src/decoding.rs @@ -12,24 +12,24 @@ use snap::read::FrameDecoder; use zfec_rs::{Chunk, Fec}; use crate::{ - constants::{FEC_K, FEC_M, SLICE_LEN}, - encode, + constants::{Format, FEC_K, FEC_M, SLICE_LEN}, + encoding, structs::EncodeInfo, utils::decode_bao_hash, }; -fn zfec_chunks(chunked_bytes: Vec>, padding: usize) -> Result> { +fn zfec_chunks(chunked_bytes: Vec>, padding: u32) -> Result> { let mut zfec_chunks = vec![]; for (i, chunk) in chunked_bytes.into_iter().enumerate() { zfec_chunks.push(Chunk::new(chunk, i)); } let fec = Fec::new(FEC_K, FEC_M)?; - let decoded = fec.decode(&zfec_chunks, padding)?; + let decoded = fec.decode(&zfec_chunks, padding as usize)?; Ok(decoded) } /// Zfec forward error correction decoding -pub fn zfec(input: &[u8], padding: usize) -> Result> { +pub fn zfec(input: &[u8], padding: u32) -> Result> { let input_len = input.len(); if input_len % FEC_M != 0 { return Err(anyhow!( @@ -48,41 +48,68 @@ pub fn zfec(input: &[u8], padding: usize) -> Result> { } /// Bao stream extraction -pub fn bao(decoded: &[u8], hash: &[u8]) -> Result> { +pub fn bao(input: &[u8], hash: &[u8]) -> Result> { let hash = decode_bao_hash(hash)?; - let decoded = bao_decode(decoded, &hash)?; + let decoded = bao_decode(input, &hash)?; Ok(decoded) } /// Ecies decryption -pub fn ecies(decoded: &[u8], privkey: &[u8]) -> Result> { - let decrypted = decrypt(privkey, decoded)?; +pub fn ecies(input: &[u8], secret_key: &[u8]) -> Result> { + let decrypted = decrypt(secret_key, input)?; Ok(decrypted) } /// Snappy decompression -pub fn snap(decrypted: &[u8]) -> Result> { +pub fn snap(input: &[u8]) -> Result> { let mut decompressed = vec![]; - FrameDecoder::new(decrypted).read_to_end(&mut decompressed)?; + FrameDecoder::new(input).read_to_end(&mut decompressed)?; Ok(decompressed) } /// Decode data from Carbonado format in reverse order: /// bao -> zfec -> ecies -> snap -pub fn decode(privkey: &[u8], hash: &[u8], input: &[u8], padding: usize) -> Result> { - let verified = bao(input, hash)?; - let decoded = zfec(&verified, padding)?; - let decrypted = ecies(&decoded, privkey)?; - let decompressed = snap(&decrypted)?; +pub fn decode( + secret_key: &[u8], + hash: &[u8], + input: &[u8], + padding: u32, + format: u8, +) -> Result> { + let format = Format::try_from(format)?; + + let verified = if format.contains(Format::Bao) { + bao(input, hash)? + } else { + input.to_owned() + }; + + let decoded = if format.contains(Format::Zfec) { + zfec(&verified, padding)? + } else { + verified + }; + + let decrypted = if format.contains(Format::Ecies) { + ecies(&decoded, secret_key)? + } else { + decoded + }; + + let decompressed = if format.contains(Format::Snappy) { + snap(&decrypted)? + } else { + decrypted + }; Ok(decompressed) } /// Extract a 1KB slice of a Bao stream at a specific index, after decoding it from zfec -pub fn extract_slice(encoded: &[u8], index: usize) -> Result> { +pub fn extract_slice(encoded: &[u8], index: u16) -> Result> { let slice_start = index * SLICE_LEN; let encoded_cursor = Cursor::new(&encoded); let mut extractor = SliceExtractor::new(encoded_cursor, slice_start as u64, SLICE_LEN as u64); @@ -93,7 +120,7 @@ pub fn extract_slice(encoded: &[u8], index: usize) -> Result> { } /// Verify a number of 1KB slices of a Bao stream starting at a specific index -pub fn verify_slice(hash: &Hash, input: &[u8], index: usize, count: usize) -> Result> { +pub fn verify_slice(hash: &Hash, input: &[u8], index: u16, count: u16) -> Result> { let slice_start = index * SLICE_LEN; let slice_len = count * SLICE_LEN; trace!("Verify slice start: {slice_start} len: {slice_len}"); @@ -113,7 +140,7 @@ pub fn scrub(input: &[u8], hash: &[u8], encode_info: &EncodeInfo) -> Result Err(anyhow!("Data does not need to be scrubbed.")), @@ -122,7 +149,7 @@ pub fn scrub(input: &[u8], hash: &[u8], encode_info: &EncodeInfo) -> Result chunks.push(chunk), Err(e) => { @@ -134,20 +161,20 @@ pub fn scrub(input: &[u8], hash: &[u8], encode_info: &EncodeInfo) -> Result Result> { } /// Ecies encryption -pub fn ecies(pubkey: &[u8], compressed: &[u8]) -> Result> { - let encrypted = encrypt(pubkey, compressed)?; +pub fn ecies(pubkey: &[u8], input: &[u8]) -> Result> { + let encrypted = encrypt(pubkey, input)?; Ok(encrypted) } @@ -39,11 +39,11 @@ pub fn bao(input: &[u8]) -> Result<(Vec, Hash)> { } /// Zfec forward error correction encoding -pub fn zfec(input: &[u8]) -> Result<(Vec, usize, usize)> { +pub fn zfec(input: &[u8]) -> Result<(Vec, u32, u32)> { let input_len = input.len(); let (padding_len, chunk_size) = calc_padding_len(input_len); // TODO: CSPRNG padding - let mut padding_bytes = vec![0u8; padding_len]; + let mut padding_bytes = vec![0u8; padding_len as usize]; let mut padded_input = Vec::from(input); padded_input.append(&mut padding_bytes); debug!( @@ -64,7 +64,7 @@ pub fn zfec(input: &[u8]) -> Result<(Vec, usize, usize)> { for chunk in &mut encoded_chunks { assert_eq!( chunk_size, - chunk.data.len(), + chunk.data.len() as u32, "Chunk size should be as calculated" ); encoded.append(&mut chunk.data); @@ -74,22 +74,61 @@ pub fn zfec(input: &[u8]) -> Result<(Vec, usize, usize)> { } /// Encode data into Carbonado format in this order: -/// snap -> ecies -> zfec -> bao +/// +/// `snap -> ecies -> zfec -> bao` +/// /// It performs compression, encryption, stream encoding, and adds error correction codes, in that order. -pub fn encode(pubkey: &[u8], input: &[u8]) -> Result<(Vec, Hash, EncodeInfo)> { - let input_len = input.len(); - - let compressed = snap(input)?; - let bytes_compressed = compressed.len(); +pub fn encode(pubkey: &[u8], input: &[u8], format: u8) -> Result<(Vec, Hash, EncodeInfo)> { + let input_len = input.len() as u32; + let format = Format::try_from(format)?; + + let compressed; + let encrypted; + let encoded; + let padding; + let chunk_size; + let verifiable; + let hash; + + let bytes_compressed; + let bytes_encrypted; + let bytes_ecc; + let bytes_verifiable; + + if format.contains(Format::Snappy) { + compressed = snap(input)?; + bytes_compressed = compressed.len() as u32; + } else { + compressed = input.to_owned(); + bytes_compressed = 0; + } - let encrypted = ecies(pubkey, &compressed)?; - let bytes_encrypted = encrypted.len(); + if format.contains(Format::Ecies) { + encrypted = ecies(pubkey, &compressed)?; + bytes_encrypted = encrypted.len() as u32; + } else { + encrypted = compressed; + bytes_encrypted = 0; + } - let (encoded, padding, chunk_size) = zfec(&encrypted)?; - let bytes_encoded = encoded.len(); + if format.contains(Format::Zfec) { + (encoded, padding, chunk_size) = zfec(&encrypted)?; + bytes_ecc = encoded.len() as u32; + } else { + encoded = encrypted; + padding = 0; + chunk_size = 0; + bytes_ecc = 0; + } - let (verifiable, hash) = bao(&encoded)?; - let bytes_verifiable = verifiable.len(); + if format.contains(Format::Bao) { + (verifiable, hash) = bao(&encoded)?; + bytes_verifiable = verifiable.len() as u32; + } else { + verifiable = encoded; + hash = Hash::from([0; 32]); + bytes_verifiable = 0; + } // Calculate totals let compression_factor = bytes_compressed as f32 / input_len as f32; @@ -102,7 +141,7 @@ pub fn encode(pubkey: &[u8], input: &[u8]) -> Result<(Vec, Hash, EncodeInfo) input_len, bytes_compressed, bytes_encrypted, - bytes_encoded, + bytes_ecc, bytes_verifiable, compression_factor, amplification_factor, diff --git a/src/fs.rs b/src/fs.rs new file mode 100644 index 0000000..0368f67 --- /dev/null +++ b/src/fs.rs @@ -0,0 +1,131 @@ +use std::{ + convert::TryFrom, + fs::File, + io::{Read, Seek, SeekFrom}, +}; + +use anyhow::{anyhow, Error, Result}; +use bao::Hash; +use secp256k1::{schnorr::Signature, KeyPair, Message, PublicKey, Secp256k1}; + +use crate::{ + constants::{Format, MAGICNO}, + utils::{decode_bao_hash, encode_bao_hash}, +}; + +#[derive(Debug)] +pub struct Header { + pub pubkey: PublicKey, + pub hash: Hash, + pub signature: Signature, + pub format: Format, + pub encoded_len: u32, + pub padding_len: u32, +} + +impl TryFrom for Header { + type Error = Error; + + fn try_from(mut file: File) -> Result { + let mut magic_no = [0_u8; 12]; + let mut pubkey = [0_u8; 33]; + let mut hash = [0_u8; 32]; + let mut signature = [0_u8; 64]; + let mut format = [0_u8; 1]; + let mut encoded_len = [0_u8; 4]; + let mut padding_len = [0_u8; 4]; + + file.seek(SeekFrom::Start(0))?; + + let mut handle = file.take(12 + 33 + 32 + 64 + 1 + 4 + 4); + handle.read_exact(&mut magic_no)?; + handle.read_exact(&mut pubkey)?; + handle.read_exact(&mut hash)?; + handle.read_exact(&mut signature)?; + handle.read_exact(&mut format)?; + handle.read_exact(&mut encoded_len)?; + handle.read_exact(&mut padding_len)?; + + if magic_no != MAGICNO { + return Err(anyhow!( + "File header lacks Carbonado magic number and may not be a proper Carbonado file" + )); + } + + let pubkey = PublicKey::from_slice(&pubkey)?; + let hash = bao::Hash::try_from(hash)?; + let signature = Signature::from_slice(&signature)?; + let format = Format::try_from(format[0])?; + let encoded_len = u32::from_le_bytes(encoded_len); + let padding_len = u32::from_le_bytes(padding_len); + + Ok(Header { + pubkey, + hash, + signature, + format, + encoded_len, + padding_len, + }) + } +} + +impl Header { + pub fn new( + sk: &[u8], + hash: &[u8], + format: Format, + encoded_len: u32, + padding_len: u32, + ) -> Result { + let secp = Secp256k1::new(); + let keypair = KeyPair::from_seckey_slice(&secp, sk)?; + let msg = Message::from_slice(hash)?; + let signature = keypair.sign_schnorr(msg); + let pubkey = PublicKey::from_keypair(&keypair); + let hash = decode_bao_hash(hash)?; + + Ok(Header { + pubkey, + signature, + hash, + format, + encoded_len, + padding_len, + }) + } + + /// Creates a header to be prepended to files. + pub fn to_vec(&self) -> Vec { + let mut pubkey_bytes = self.pubkey.serialize().to_vec(); // 33 bytes + assert_eq!(pubkey_bytes.len(), 33); + let mut hash_bytes = self.hash.as_bytes().to_vec(); // 32 bytes + assert_eq!(hash_bytes.len(), 32); + let mut signature_bytes = hex::decode(self.signature.to_string()).expect("hex encoded"); // 64 bytes + assert_eq!(signature_bytes.len(), 64); + let mut format_bytes = self.format.bits().to_le_bytes().to_vec(); // 1 byte + let mut encoded_len_bytes = self.encoded_len.to_le_bytes().to_vec(); // 8 bytes + let mut padding_bytes = self.padding_len.to_le_bytes().to_vec(); // 2 bytes + + let mut header = Vec::new(); + + header.append(&mut MAGICNO.to_vec()); // 12 bytes + header.append(&mut pubkey_bytes); + header.append(&mut hash_bytes); + header.append(&mut signature_bytes); + header.append(&mut format_bytes); + header.append(&mut encoded_len_bytes); + header.append(&mut padding_bytes); + header + } + + pub fn filename(&self) -> String { + let hash = encode_bao_hash(&self.hash); + let fmt = self.format.bits(); + format!("{hash}.c{fmt}") + } +} + +// fn create_file(header_bytes: &[u8], encoded_bytes: &[u8]) { +// todo!(); +// } diff --git a/src/lib.rs b/src/lib.rs index 9c593eb..e673143 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,8 +1,32 @@ -mod constants; -mod decode; -mod encode; -mod structs; +/// For details on Carbonado formats and their uses, see [the Carbonado Format bitmask constant](constants::Format) +pub mod constants; +/// Filesystem helper methods +pub mod fs; +/// See [structs::EncodeInfo](structs::EncodeInfo) for various statistics gatthered in the encoding step. +pub mod structs; +/// Various utilities to assist with Carbonado encoding steps pub mod utils; -pub use decode::{decode, extract_slice, scrub, verify_slice}; -pub use encode::encode; +mod decoding; +mod encoding; + +/// Encode data into Carbonado format in this order: +/// +/// `snap -> ecies -> zfec -> bao` +/// +/// It performs compression, encryption, stream encoding, and adds error correction codes, in that order. +pub use encoding::encode; + +/// Decode data from Carbonado format in reverse order: +/// bao -> zfec -> ecies -> snap +pub use decoding::decode; + +/// Extract a 1KB slice of a Bao stream at a specific index, after decoding it from zfec +pub use decoding::extract_slice; + +/// Verify a number of 1KB slices of a Bao stream starting at a specific index +pub use decoding::verify_slice; + +/// Scrub zfec-encoded data, correcting flipped bits using error correction codes +/// Returns an error when either valid data cannot be provided, or data is already valid +pub use decoding::scrub; diff --git a/src/structs.rs b/src/structs.rs index 9fcd433..77711bb 100644 --- a/src/structs.rs +++ b/src/structs.rs @@ -1,14 +1,27 @@ use serde::{Deserialize, Serialize}; +/// Information from the encoding step, some of which is needed for decoding. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct EncodeInfo { - pub input_len: usize, - pub bytes_compressed: usize, // snappy - pub bytes_encrypted: usize, // ecies - pub bytes_encoded: usize, // zfec - pub bytes_verifiable: usize, // bao + /// How many bytes input into the encoding step + pub input_len: u32, + /// How large the data is after Snappy compression + pub bytes_compressed: u32, + /// Compression factor + /// Values below 1.0 are desirable; 0.2 is typical of contracts, and 0.8 is typical of code. + /// A value above 1.0 indicates the file grew in size, which occurs when used on incompressible file formats. pub compression_factor: f32, + /// How large the data was after Ecies secp256k1 and AES-GCM authenticated encryption + /// This is not expected to add much overhead, typically a hundred bytes + pub bytes_encrypted: u32, + /// How large the data is after adding Zfec error correction codes + pub bytes_ecc: u32, + /// How large the data is after Bao encoding, for remote slice verification and integrity-checking + pub bytes_verifiable: u32, + /// The total amount of file amplification. 2.0x is typical for 4/8 Zfec encoding, the others are pretty minimal, at roughly 1.1x. pub amplification_factor: f32, - pub padding: usize, - pub chunk_size: usize, + /// The amount of padding added to input data in order to align it with Bao slice size (1KB) and 4/8 Zfec chunk size (4KB). + pub padding: u32, + /// How many bytes are in each Zfec chunk + pub chunk_size: u32, } diff --git a/src/utils.rs b/src/utils.rs index 6bbaaa8..becba81 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,38 +2,56 @@ use std::sync::Once; use anyhow::Result; use bao::Hash; +use bech32::{decode, encode, FromBase32, ToBase32, Variant}; use log::trace; use crate::constants::{FEC_K, SLICE_LEN}; -pub fn decode_bao_hash(hash: &[u8]) -> Result { - let hash_array: [u8; bao::HASH_SIZE] = hash[..].try_into()?; - - Ok(hash_array.into()) -} - static INIT: Once = Once::new(); +/// Helper function only used in tests pub fn init_logging() { INIT.call_once(|| { use std::env::{set_var, var}; if var("RUST_LOG").is_err() { - set_var("RUST_LOG", "carbonado=trace,codec=trace,apocalypse=trace"); + set_var( + "RUST_LOG", + "carbonado=trace,codec=trace,apocalypse=trace,format=trace", + ); } pretty_env_logger::init(); }); } +pub fn encode_bao_hash(hash: &Hash) -> String { + let hash_hex = hash.to_hex(); + hash_hex.to_string() +} + +pub fn decode_bao_hash(hash: &[u8]) -> Result { + let hash_array: [u8; bao::HASH_SIZE] = hash[..].try_into()?; + Ok(hash_array.into()) +} + /// Calculate padding (find a length that divides evenly both by Zfec FEC_K and Bao SLICE_LEN, then find the difference) /// Returns (padding_len, chunk_size) -pub fn calc_padding_len(input_len: usize) -> (usize, usize) { - let overlap_constant = SLICE_LEN as usize * FEC_K; - let target_size = - (input_len as f64 / overlap_constant as f64).ceil() as usize * overlap_constant; +pub fn calc_padding_len(input_len: usize) -> (u32, u32) { + let input_len = input_len as f64; + let overlap_constant = SLICE_LEN as f64 * FEC_K as f64; + let target_size = (input_len / overlap_constant).ceil() * overlap_constant; let padding_len = target_size - input_len; - let chunk_size = target_size / FEC_K; - trace!("input_len: {input_len}, target_size: {target_size}, padding_len: {padding_len}, chunk_size: {chunk_size}"); - (padding_len, chunk_size) + let chunk_size = target_size / FEC_K as f64; + trace!("input_len: {input_len:.0}, target_size: {target_size:.0}, padding_len: {padding_len:.0}, chunk_size: {chunk_size:.0}"); + (padding_len as u32, chunk_size as u32) +} + +pub fn bech32m_encode(hrp: &str, bytes: &[u8]) -> Result { + Ok(encode(hrp, bytes.to_base32(), Variant::Bech32m)?) +} + +pub fn bech32_decode(bech32_str: &str) -> Result<(String, Vec, Variant)> { + let (hrp, words, variant) = decode(bech32_str)?; + Ok((hrp, Vec::::from_base32(&words)?, variant)) } diff --git a/tests/apocalypse.rs b/tests/apocalypse.rs index 4f04027..a85057f 100644 --- a/tests/apocalypse.rs +++ b/tests/apocalypse.rs @@ -65,9 +65,9 @@ fn wasm_code() -> Result<()> { fn act_of_god(path: &str) -> Result<()> { let input = read(path)?; - let (_privkey, pubkey) = generate_keypair(); + let (_sk, pk) = generate_keypair(); info!("Encoding {path}..."); - let (orig_encoded, hash, encode_info) = encode(&pubkey.serialize(), &input)?; + let (orig_encoded, hash, encode_info) = encode(&pk.serialize(), &input, 15)?; debug!("Encoding Info: {encode_info:#?}"); let mut new_encoded = Vec::new(); new_encoded.clone_from(&orig_encoded); diff --git a/tests/codec.rs b/tests/codec.rs index 5afbf06..e3f99ca 100644 --- a/tests/codec.rs +++ b/tests/codec.rs @@ -65,14 +65,14 @@ fn wasm_code() -> Result<()> { fn codec(path: &str) -> Result<()> { let input = read(path)?; - let (privkey, pubkey) = generate_keypair(); + let (sk, pk) = generate_keypair(); info!("Encoding {path}..."); - let (encoded, hash, encode_info) = encode(&pubkey.serialize(), &input)?; + let (encoded, hash, encode_info) = encode(&pk.serialize(), &input, 15)?; debug!("Encoding Info: {encode_info:#?}"); assert_eq!( - encoded.len(), + encoded.len() as u32, encode_info.bytes_verifiable, "Length of encoded bytes matches bytes_verifiable field" ); @@ -82,10 +82,11 @@ fn codec(path: &str) -> Result<()> { info!("Decoding Carbonado bytes"); let decoded = decode( - &privkey.serialize(), + &sk.serialize(), hash.as_bytes(), &encoded, encode_info.padding, + 15, )?; assert_eq!(decoded, input, "Decoded output is same as encoded input"); diff --git a/tests/format.rs b/tests/format.rs new file mode 100644 index 0000000..acb033b --- /dev/null +++ b/tests/format.rs @@ -0,0 +1,78 @@ +use std::{fs::OpenOptions, io::Write, path::PathBuf}; + +use anyhow::Result; +use carbonado::{constants::Format, decode, encode, fs::Header, utils::init_logging}; +use ecies::utils::generate_keypair; +use log::{debug, info, trace}; +use secp256k1::PublicKey; +use wasm_bindgen_test::wasm_bindgen_test_configure; + +wasm_bindgen_test_configure!(run_in_browser); + +#[test] +fn format() -> Result<()> { + init_logging(); + + let input = "Hello world!".as_bytes(); + let (sk, pk) = generate_keypair(); + let format = Format::try_from(15)?; + + info!("Encoding input: {input:?}..."); + let (encoded, hash, encode_info) = encode(&pk.serialize(), input, 15)?; + + debug!("Encoding Info: {encode_info:#?}"); + assert_eq!( + encoded.len() as u32, + encode_info.bytes_verifiable, + "Length of encoded bytes matches bytes_verifiable field" + ); + + let header = Header::new( + &sk.serialize(), + hash.as_bytes(), + format, + encode_info.bytes_verifiable, + encode_info.padding, + )?; + trace!("Header: {header:#?}"); + + let header_bytes = header.to_vec(); + + let file_path = PathBuf::from("/tmp").join(header.filename()); + info!("Writing test file to: {file_path:?}"); + let mut file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(file_path)?; + file.write_all(&header_bytes)?; + file.write_all(&encoded)?; + info!("Test file successfully written."); + + info!("Parsing file headers..."); + let header = Header::try_from(file)?; + + assert_eq!( + header.pubkey, + PublicKey::from_slice(&pk.serialize_compressed())? + ); + assert_eq!(header.hash, hash); + assert_eq!(header.format, format); + assert_eq!(header.encoded_len, encode_info.bytes_verifiable); + assert_eq!(header.padding_len, encode_info.padding); + + info!("Decoding Carbonado bytes"); + let decoded = decode( + &sk.serialize(), + hash.as_bytes(), + &encoded, + encode_info.padding, + 15, + )?; + + assert_eq!(decoded, input, "Decoded output is same as encoded input"); + + info!("All good!"); + + Ok(()) +}