From 95a2e3ed18d044f44267348941d2b994154cd535 Mon Sep 17 00:00:00 2001 From: Corentin REGAL Date: Thu, 26 Dec 2024 11:42:29 +0100 Subject: [PATCH] check --- .../src/cas_chunk_format/deserialize_async.rs | 49 ++++++++++--------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/cas_object/src/cas_chunk_format/deserialize_async.rs b/cas_object/src/cas_chunk_format/deserialize_async.rs index dc4c2927..2cc95ae1 100644 --- a/cas_object/src/cas_chunk_format/deserialize_async.rs +++ b/cas_object/src/cas_chunk_format/deserialize_async.rs @@ -116,43 +116,46 @@ pub fn deserialize_chunk_to_writer_stream(reader: &mut R) -> Result( reader: R, writer: Sender>, - start_offset: Option, - end_offset: Option, + _start_offset: Option, + _end_offset: Option, ) -> JoinHandle<()> { let mut reader = SyncIoBridge::new(reader); - let mut start_offset = start_offset.unwrap_or(0); - let mut end_offset = end_offset.unwrap_or(std::u64::MAX); + // let mut start_offset = start_offset.unwrap_or(0); + // let mut end_offset = end_offset.unwrap_or(std::u64::MAX); // The deserialization of the chunks is done synchronously and thus needs to happen on a blocking thread // Moreover we expect to return from this function right away to be able to read the other end of the stream spawn_blocking(move || { - let mut uncompressed_len = 0; + // let mut uncompressed_len = 0; loop { match deserialize_chunk_to_writer_stream(&mut reader) { Ok(uncompressed_bytes) => { - let uncompressed_bytes = if start_offset >= uncompressed_bytes.len() as u64 { - // Skip this chunk, it's entirely before the start offset - start_offset -= uncompressed_bytes.len() as u64; - continue; - } else if start_offset > 0 { - // Skip the part of the chunk before the start offset - let offset = start_offset as usize; - start_offset = 0; - uncompressed_bytes.slice(offset..) - } else if end_offset < uncompressed_len + uncompressed_bytes.len() as u64 { - // Skip the part of the chunk after the end offset - let offset = (end_offset - uncompressed_len) as usize; - end_offset = 0; - uncompressed_bytes.slice(..offset) - } else { - uncompressed_bytes - }; + // let uncompressed_bytes = if start_offset >= uncompressed_bytes.len() as u64 { + // // Skip this chunk, it's entirely before the start offset + // start_offset -= uncompressed_bytes.len() as u64; + // continue; + // } else if start_offset > 0 { + // // Skip the part of the chunk before the start offset + // let offset = start_offset as usize; + // start_offset = 0; + // uncompressed_bytes.slice(offset..) + // } else if end_offset < uncompressed_len + uncompressed_bytes.len() as u64 { + // // Skip the part of the chunk after the end offset + // let offset = (end_offset - uncompressed_len) as usize; + // end_offset = 0; + // uncompressed_bytes.slice(..offset) + // } else { + // uncompressed_bytes + // }; - uncompressed_len += uncompressed_bytes.len() as u64; + // uncompressed_len += uncompressed_bytes.len() as u64; if writer.blocking_send(Ok(uncompressed_bytes)).is_err() { // Other end of the channel is closed, we can return break; } + // if end_offset == 0 { + // break; + // } }, Err(CasObjectError::InternalIOError(e)) => { if e.kind() == std::io::ErrorKind::UnexpectedEof {