Skip to content

Commit

Permalink
add todos for error transitions between layers
Browse files Browse the repository at this point in the history
Signed-off-by: simonsan <14062932+simonsan@users.noreply.github.com>
  • Loading branch information
simonsan committed Oct 17, 2024
1 parent bccfad4 commit 4860cf3
Showing 27 changed files with 422 additions and 282 deletions.
25 changes: 16 additions & 9 deletions crates/core/src/archiver.rs
Original file line number Diff line number Diff line change
@@ -6,10 +6,8 @@ pub(crate) mod tree_archiver;
use std::path::{Path, PathBuf, StripPrefixError};

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-unknown-linux-gnu

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-apple-darwin

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-pc-windows-msvc

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking aarch64-apple-darwin

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-apple-darwin

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking aarch64-apple-darwin

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-unknown-linux-gnu

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-unknown-linux-musl

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-unknown-linux-musl

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-pc-windows-msvc

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-pc-windows-gnu

unused import: `StripPrefixError`

Check warning on line 6 in crates/core/src/archiver.rs

GitHub Actions / Cross checking x86_64-pc-windows-gnu

unused import: `StripPrefixError`

use chrono::Local;
use displaydoc::Display;
use log::warn;
use pariter::{scope, IteratorExt};
use thiserror::Error;

use crate::{
archiver::{
@@ -23,11 +21,11 @@ use crate::{
ReadGlobalIndex,
},
repofile::{configfile::ConfigFile, snapshotfile::SnapshotFile},
Progress, RusticResult,
Progress,
};

/// [`ArchiverErrorKind`] describes the errors that can be returned from the archiver
#[derive(Error, Debug, Display)]
#[derive(thiserror::Error, Debug, displaydoc::Display)]
pub enum ArchiverErrorKind {
/// tree stack empty
TreeStackEmpty,
@@ -165,7 +163,7 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> {
<R as ReadSource>::Open: Send,
<R as ReadSource>::Iter: Send,
{
std::thread::scope(|s| -> RusticResult<_> {
std::thread::scope(|s| -> ArchiverResult<_> {
// determine backup size in parallel to running backup
let src_size_handle = s.spawn(|| {
if !no_scan && !p.is_hidden() {
@@ -208,7 +206,7 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> {
// handle beginning and ending of trees
let iter = TreeIterator::new(iter);

scope(|scope| -> RusticResult<_> {
scope(|scope| -> ArchiverResult<_> {
// use parent snapshot
iter.filter_map(
|item| match self.parent.process(&self.be, self.index, item) {
@@ -241,13 +239,22 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> {
stats.apply(&mut summary, BlobType::Data);
self.snap.tree = id;

self.indexer.write().unwrap().finalize()?;
self.indexer
.write()
.unwrap()
.finalize()
.map_err(|_err| todo!("Error transition"))?;

summary.finalize(self.snap.time)?;
summary
.finalize(self.snap.time)
.map_err(|_err| todo!("Error transition"))?;
self.snap.summary = Some(summary);

if !skip_identical_parent || Some(self.snap.tree) != self.parent.tree_id() {
let id = self.be.save_file(&self.snap)?;
let id = self
.be
.save_file(&self.snap)
.map_err(|_err| todo!("Error transition"))?;
self.snap.id = id.into();
}

22 changes: 14 additions & 8 deletions crates/core/src/archiver/file_archiver.rs
Original file line number Diff line number Diff line change
@@ -66,15 +66,16 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> {
indexer: SharedIndexer<BE>,
config: &ConfigFile,
) -> ArchiverResult<Self> {
let poly = config.poly()?;
let poly = config.poly().map_err(|_err| todo!("Error transition"))?;

let data_packer = Packer::new(
be,
BlobType::Data,
indexer,
config,
index.total_size(BlobType::Data),
)?;
)
.map_err(|_err| todo!("Error transition"))?;

let rabin = Rabin64::new_with_polynom(6, poly);

@@ -121,7 +122,8 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> {
} else if node.node_type == NodeType::File {
let r = open
.ok_or(ArchiverErrorKind::UnpackingTreeTypeOptionalFailed)?
.open()?;
.open()
.map_err(|_err| todo!("Error transition"))?;
self.backup_reader(r, node, p)?
} else {
(node, 0)
@@ -140,17 +142,18 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> {
) -> ArchiverResult<(Node, u64)> {
let chunks: Vec<_> = ChunkIter::new(
r,
usize::try_from(node.meta.size)
.map_err(ArchiverErrorKind::ConversionFromU64ToUsizeFailed)?,
usize::try_from(node.meta.size).map_err(|_err| todo!("Error transition"))?,
self.rabin.clone(),
)
.map(|chunk| {
let chunk = chunk.map_err(ArchiverErrorKind::FromStdIo)?;
let chunk = chunk.map_err(|_err| todo!("Error transition"))?;
let id = hash(&chunk);
let size = chunk.len() as u64;

if !self.index.has_data(&DataId::from(id)) {
self.data_packer.add(chunk.into(), BlobId::from(id))?;
self.data_packer
.add(chunk.into(), BlobId::from(id))
.map_err(|_err| todo!("Error transition"))?;
}
p.inc(size);
Ok((DataId::from(id), size))
@@ -175,6 +178,9 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> {
///
/// If the channel could not be dropped
pub(crate) fn finalize(self) -> ArchiverResult<PackerStats> {
Ok(self.data_packer.finalize()?)
Ok(self
.data_packer
.finalize()
.map_err(|_err| todo!("Error transition"))?)
}
}
27 changes: 14 additions & 13 deletions crates/core/src/backend.rs
Original file line number Diff line number Diff line change
@@ -13,10 +13,8 @@ pub(crate) mod warm_up;
use std::{io::Read, num::TryFromIntError, ops::Deref, path::PathBuf, sync::Arc};

use bytes::Bytes;
use displaydoc::Display;
use enum_map::Enum;
use log::trace;
use thiserror::Error;

#[cfg(test)]
use mockall::mock;
@@ -28,12 +26,12 @@ use crate::{
id::{Id, IdResult},
};

#[derive(Error, Debug, displaydoc::Display)]
#[derive(thiserror::Error, Debug, displaydoc::Display)]
/// Experienced an error in the backend: `{0}`
pub struct BackendDynError(pub Box<dyn std::error::Error + Send + Sync>);

/// [`BackendAccessErrorKind`] describes the errors that can be returned by the various Backends
#[derive(Error, Debug, Display)]
#[derive(thiserror::Error, Debug, displaydoc::Display)]
pub enum BackendAccessErrorKind {
/// General Backend Error: {0:?}
#[error(transparent)]
@@ -59,7 +57,7 @@ pub enum BackendAccessErrorKind {
}

/// [`CryptBackendErrorKind`] describes the errors that can be returned by a Decryption action in Backends
#[derive(Error, Debug, Display)]
#[derive(thiserror::Error, Debug, displaydoc::Display)]
pub enum CryptBackendErrorKind {
/// decryption not supported for backend
DecryptionNotSupportedForBackend,
@@ -266,7 +264,11 @@ pub trait FindInBackend: ReadBackend {
///
/// [`BackendAccessErrorKind::NoSuitableIdFound`]: crate::error::BackendAccessErrorKind::NoSuitableIdFound
/// [`BackendAccessErrorKind::IdNotUnique`]: crate::error::BackendAccessErrorKind::IdNotUnique
fn find_starts_with<T: AsRef<str>>(&self, tpe: FileType, vec: &[T]) -> BackendResult<Vec<Id>> {
fn find_starts_with<T: AsRef<str>>(
&self,
tpe: FileType,
vec: &[T],
) -> BackendAccessResult<Vec<Id>> {
#[derive(Clone, Copy, PartialEq, Eq)]
enum MapResult<T> {
None,
@@ -294,11 +296,10 @@ pub trait FindInBackend: ReadBackend {
MapResult::Some(id) => Ok(id),
MapResult::None => Err(BackendAccessErrorKind::NoSuitableIdFound(
(vec[i]).as_ref().to_string(),
)
.into()),
MapResult::NonUnique => {
Err(BackendAccessErrorKind::IdNotUnique((vec[i]).as_ref().to_string()).into())
}
)),
MapResult::NonUnique => Err(BackendAccessErrorKind::IdNotUnique(
(vec[i]).as_ref().to_string(),
)),
})
.collect()
}
@@ -319,7 +320,7 @@ pub trait FindInBackend: ReadBackend {
/// [`IdErrorKind::HexError`]: crate::error::IdErrorKind::HexError
/// [`BackendAccessErrorKind::NoSuitableIdFound`]: crate::error::BackendAccessErrorKind::NoSuitableIdFound
/// [`BackendAccessErrorKind::IdNotUnique`]: crate::error::BackendAccessErrorKind::IdNotUnique
fn find_id(&self, tpe: FileType, id: &str) -> BackendResult<Id> {
fn find_id(&self, tpe: FileType, id: &str) -> BackendAccessResult<Id> {
Ok(self.find_ids(tpe, &[id.to_string()])?.remove(0))
}

@@ -343,7 +344,7 @@ pub trait FindInBackend: ReadBackend {
/// [`IdErrorKind::HexError`]: crate::error::IdErrorKind::HexError
/// [`BackendAccessErrorKind::NoSuitableIdFound`]: crate::error::BackendAccessErrorKind::NoSuitableIdFound
/// [`BackendAccessErrorKind::IdNotUnique`]: crate::error::BackendAccessErrorKind::IdNotUnique
fn find_ids<T: AsRef<str>>(&self, tpe: FileType, ids: &[T]) -> BackendResult<Vec<Id>> {
fn find_ids<T: AsRef<str>>(&self, tpe: FileType, ids: &[T]) -> BackendAccessResult<Vec<Id>> {
ids.iter()
.map(|id| id.as_ref().parse())
.collect::<IdResult<Vec<_>>>()
70 changes: 44 additions & 26 deletions crates/core/src/backend/cache.rs
Original file line number Diff line number Diff line change
@@ -8,21 +8,17 @@ use std::{

use bytes::Bytes;
use dirs::cache_dir;
use displaydoc::Display;
use log::{trace, warn};
use thiserror::Error;
use walkdir::WalkDir;

use crate::{
backend::{FileType, ReadBackend, WriteBackend},
error::RusticErrorKind,
id::Id,
repofile::configfile::RepositoryId,
RusticError, RusticResult,
};

/// [`CacheBackendErrorKind`] describes the errors that can be returned by a Caching action in Backends
#[derive(Error, Debug, Display)]
#[derive(thiserror::Error, Debug, displaydoc::Display)]
pub enum CacheBackendErrorKind {
/// no cache dir
NoCacheDirectory,
@@ -97,8 +93,11 @@ impl ReadBackend for CachedBackend {
/// # Returns
///
/// A vector of tuples containing the id and size of the files.
fn list_with_size(&self, tpe: FileType) -> Result<Vec<(Id, u32)>> {
let list = self.be.list_with_size(tpe)?;
fn list_with_size(&self, tpe: FileType) -> CacheBackendResult<Vec<(Id, u32)>> {
let list = self
.be
.list_with_size(tpe)
.map_err(|_err| todo!("Error transition"))?;

if tpe.is_cacheable() {
if let Err(err) = self.cache.remove_not_in_list(tpe, &list) {
@@ -125,22 +124,27 @@ impl ReadBackend for CachedBackend {
/// The data read.
///
/// [`CacheBackendErrorKind::FromIoError`]: crate::error::CacheBackendErrorKind::FromIoError
fn read_full(&self, tpe: FileType, id: &Id) -> Result<Bytes> {
fn read_full(&self, tpe: FileType, id: &Id) -> CacheBackendResult<Bytes> {
if tpe.is_cacheable() {
match self.cache.read_full(tpe, id) {
Ok(Some(data)) => return Ok(data),
Ok(None) => {}
Err(err) => warn!("Error in cache backend reading {tpe:?},{id}: {err}"),
}
let res = self.be.read_full(tpe, id);
let res = self
.be
.read_full(tpe, id)
.map_err(|_err| todo!("Error transition"));
if let Ok(data) = &res {
if let Err(err) = self.cache.write_bytes(tpe, id, data) {
warn!("Error in cache backend writing {tpe:?},{id}: {err}");
}
}
res
} else {
self.be.read_full(tpe, id)
self.be
.read_full(tpe, id)
.map_err(|_err| todo!("Error transition"))
}
}

@@ -170,7 +174,7 @@ impl ReadBackend for CachedBackend {
cacheable: bool,
offset: u32,
length: u32,
) -> Result<Bytes> {
) -> CacheBackendResult<Bytes> {
if cacheable || tpe.is_cacheable() {
match self.cache.read_partial(tpe, id, offset, length) {
Ok(Some(data)) => return Ok(data),
@@ -186,25 +190,29 @@ impl ReadBackend for CachedBackend {
}
Ok(Bytes::copy_from_slice(&data.slice(range)))
}
error => error,
error => error.map_err(|_err| todo!("Error transition")),
}
} else {
self.be.read_partial(tpe, id, cacheable, offset, length)
self.be
.read_partial(tpe, id, cacheable, offset, length)
.map_err(|_err| todo!("Error transition"))
}
}
fn needs_warm_up(&self) -> bool {
self.be.needs_warm_up()
}

fn warm_up(&self, tpe: FileType, id: &Id) -> Result<()> {
self.be.warm_up(tpe, id)
fn warm_up(&self, tpe: FileType, id: &Id) -> CacheBackendResult<()> {
self.be
.warm_up(tpe, id)
.map_err(|_err| todo!("Error transition"))
}
}

impl WriteBackend for CachedBackend {
/// Creates the backend.
fn create(&self) -> Result<()> {
self.be.create()
fn create(&self) -> CacheBackendResult<()> {
self.be.create().map_err(|_err| todo!("Error transition"))
}

/// Writes the given data to the given file.
@@ -217,13 +225,21 @@ impl WriteBackend for CachedBackend {
/// * `id` - The id of the file.
/// * `cacheable` - Whether the file is cacheable.
/// * `buf` - The data to write.
fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> {
fn write_bytes(
&self,
tpe: FileType,
id: &Id,
cacheable: bool,
buf: Bytes,
) -> CacheBackendResult<()> {
if cacheable || tpe.is_cacheable() {
if let Err(err) = self.cache.write_bytes(tpe, id, &buf) {
warn!("Error in cache backend writing {tpe:?},{id}: {err}");
}
}
self.be.write_bytes(tpe, id, cacheable, buf)
self.be
.write_bytes(tpe, id, cacheable, buf)
.map_err(|_err| todo!("Error transition"))
}

/// Removes the given file.
@@ -234,13 +250,15 @@ impl WriteBackend for CachedBackend {
///
/// * `tpe` - The type of the file.
/// * `id` - The id of the file.
fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> {
fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> CacheBackendResult<()> {
if cacheable || tpe.is_cacheable() {
if let Err(err) = self.cache.remove(tpe, id) {
warn!("Error in cache backend removing {tpe:?},{id}: {err}");
}
}
self.be.remove(tpe, id, cacheable)
self.be
.remove(tpe, id, cacheable)
.map_err(|_err| todo!("Error transition"))
}
}

@@ -276,10 +294,10 @@ impl Cache {
dir.push("rustic");
dir
};
fs::create_dir_all(&path).map_err(CacheBackendErrorKind::FromIoError)?;
cachedir::ensure_tag(&path).map_err(CacheBackendErrorKind::FromIoError)?;
fs::create_dir_all(&path).map_err(|_err| todo!("Error transition"))?;
cachedir::ensure_tag(&path).map_err(|_err| todo!("Error transition"))?;
path.push(id.to_hex());
fs::create_dir_all(&path).map_err(CacheBackendErrorKind::FromIoError)?;
fs::create_dir_all(&path).map_err(|_err| todo!("Error transition"))?;
Ok(Self { path })
}

@@ -454,10 +472,10 @@ impl Cache {
};
_ = file
.seek(SeekFrom::Start(u64::from(offset)))
.map_err(CacheBackendErrorKind::FromIoError)?;
.map_err(|_err| todo!("Error transition"))?;
let mut vec = vec![0; length as usize];
file.read_exact(&mut vec)
.map_err(CacheBackendErrorKind::FromIoError)?;
.map_err(|_err| todo!("Error transition"))?;
trace!("cache hit!");
Ok(Some(vec.into()))
}
Loading

0 comments on commit 4860cf3

Please sign in to comment.