From 6ee6ef805922d8b9cd05fa26b12362697600a281 Mon Sep 17 00:00:00 2001 From: simonsan <14062932+simonsan@users.noreply.github.com> Date: Wed, 23 Oct 2024 18:44:51 +0200 Subject: [PATCH] Rebase --- Cargo.lock | 1 - crates/backend/Cargo.toml | 1 - crates/backend/src/choose.rs | 26 +- crates/backend/src/error.rs | 134 --- crates/backend/src/lib.rs | 21 +- crates/backend/src/local.rs | 91 +- crates/backend/src/opendal.rs | 47 +- crates/backend/src/rclone.rs | 47 +- crates/backend/src/rest.rs | 52 +- crates/backend/src/util.rs | 7 +- crates/core/CHANGELOG.md | 7 +- crates/core/Cargo.toml | 2 +- crates/core/src/archiver.rs | 40 +- crates/core/src/archiver/file_archiver.rs | 19 +- crates/core/src/archiver/parent.rs | 7 +- crates/core/src/archiver/tree_archiver.rs | 13 +- crates/core/src/backend.rs | 129 +- crates/core/src/backend/cache.rs | 136 ++- crates/core/src/backend/childstdout.rs | 15 +- crates/core/src/backend/decrypt.rs | 120 +- crates/core/src/backend/dry_run.rs | 28 +- crates/core/src/backend/hotcold.rs | 16 +- crates/core/src/backend/ignore.rs | 99 +- crates/core/src/backend/local_destination.rs | 250 +++- crates/core/src/backend/node.rs | 134 ++- crates/core/src/backend/warm_up.rs | 16 +- crates/core/src/blob/packer.rs | 130 +- crates/core/src/blob/tree.rs | 123 +- crates/core/src/chunker.rs | 13 +- crates/core/src/commands.rs | 44 + crates/core/src/commands/backup.rs | 16 +- crates/core/src/commands/cat.rs | 5 +- crates/core/src/commands/check.rs | 109 +- crates/core/src/commands/config.rs | 130 +- crates/core/src/commands/dump.rs | 8 +- crates/core/src/commands/forget.rs | 4 +- crates/core/src/commands/init.rs | 4 +- crates/core/src/commands/key.rs | 6 +- crates/core/src/commands/merge.rs | 16 +- crates/core/src/commands/prune.rs | 40 +- crates/core/src/commands/repair/index.rs | 17 +- crates/core/src/commands/repair/snapshots.rs | 4 +- crates/core/src/commands/repoinfo.rs | 4 +- crates/core/src/commands/restore.rs | 31 +- crates/core/src/crypto.rs | 20 +- crates/core/src/crypto/aespoly1305.rs | 8 +- crates/core/src/error.rs | 1128 ++++++------------ crates/core/src/id.rs | 20 +- crates/core/src/index.rs | 25 +- crates/core/src/lib.rs | 2 +- crates/core/src/repofile/configfile.rs | 44 +- crates/core/src/repofile/keyfile.rs | 129 +- crates/core/src/repofile/packfile.rs | 66 +- crates/core/src/repofile/snapshotfile.rs | 76 +- crates/core/src/repository.rs | 325 ++--- crates/core/src/repository/command_input.rs | 74 +- crates/core/src/repository/warm_up.rs | 26 +- crates/core/src/vfs.rs | 66 +- crates/core/tests/integration.rs | 2 +- 59 files changed, 2468 insertions(+), 1705 deletions(-) delete mode 100644 crates/backend/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 0f5d85190..71745aa79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3270,7 +3270,6 @@ name = "rustic_backend" version = "0.4.1" dependencies = [ "aho-corasick", - "anyhow", "backoff", "bytes", "bytesize", diff --git a/crates/backend/Cargo.toml b/crates/backend/Cargo.toml index b35686b94..25b70d8bd 100644 --- a/crates/backend/Cargo.toml +++ b/crates/backend/Cargo.toml @@ -44,7 +44,6 @@ rclone = ["rest", "dep:rand", "dep:semver"] rustic_core = { workspace = true } # errors -anyhow = "1.0.89" displaydoc = "0.2.5" thiserror = "1.0.64" diff --git a/crates/backend/src/choose.rs b/crates/backend/src/choose.rs index ab5007ca7..3fed2b2c5 100644 --- a/crates/backend/src/choose.rs +++ b/crates/backend/src/choose.rs @@ -1,14 +1,12 @@ //! This module contains [`BackendOptions`] and helpers to choose a backend from a given url. -use anyhow::{anyhow, Result}; use derive_setters::Setters; use std::{collections::HashMap, sync::Arc}; use strum_macros::{Display, EnumString}; #[allow(unused_imports)] -use rustic_core::{RepositoryBackends, WriteBackend}; +use rustic_core::{RepositoryBackends, RusticResult, WriteBackend}; use crate::{ - error::BackendAccessErrorKind, local::LocalBackend, util::{location_to_type_and_path, BackendLocation}, }; @@ -25,6 +23,13 @@ use crate::rest::RestBackend; #[cfg(feature = "clap")] use clap::ValueHint; +/// [`ChooseBackendErrorKind`] describes the errors that can be returned by the choose backend +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum ChooseBackendErrorKind {} + +pub(crate) type ChooseBackendResult = Result; + /// Options for a backend. #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(conflate::Merge))] @@ -75,12 +80,12 @@ impl BackendOptions { /// # Returns /// /// The backends for the repository. - pub fn to_backends(&self) -> Result { + pub fn to_backends(&self) -> RusticResult { let mut options = self.options.clone(); options.extend(self.options_cold.clone()); let be = self .get_backend(self.repository.as_ref(), options)? - .ok_or_else(|| anyhow!("No repository given."))?; + .ok_or_else(|| Err("No repository given.".to_string()).into())?; let mut options = self.options.clone(); options.extend(self.options_hot.clone()); let be_hot = self.get_backend(self.repo_hot.as_ref(), options)?; @@ -108,12 +113,15 @@ impl BackendOptions { &self, repo_string: Option<&String>, options: HashMap, - ) -> Result>> { + ) -> BackendResult>> { repo_string .map(|string| { let (be_type, location) = location_to_type_and_path(string)?; be_type.to_backend(location, options.into()).map_err(|err| { - BackendAccessErrorKind::BackendLoadError(be_type.to_string(), err).into() + BackendErrorKind::BackendLoadError { + name: be_type, + source: err, + } }) }) .transpose() @@ -138,7 +146,7 @@ pub trait BackendChoice { &self, location: BackendLocation, options: Option>, - ) -> Result>; + ) -> RusticResult>; } /// The supported backend types. @@ -176,7 +184,7 @@ impl BackendChoice for SupportedBackend { &self, location: BackendLocation, options: Option>, - ) -> Result> { + ) -> RusticResult> { let options = options.unwrap_or_default(); Ok(match self { diff --git a/crates/backend/src/error.rs b/crates/backend/src/error.rs deleted file mode 100644 index f2aa3477d..000000000 --- a/crates/backend/src/error.rs +++ /dev/null @@ -1,134 +0,0 @@ -#![allow(clippy::doc_markdown)] -use std::{num::TryFromIntError, process::ExitStatus, str::Utf8Error}; - -use displaydoc::Display; -use thiserror::Error; - -/// [`BackendAccessErrorKind`] describes the errors that can be returned by the various Backends -#[derive(Error, Debug, Display)] -#[non_exhaustive] -pub enum BackendAccessErrorKind { - /// backend {0:?} is not supported! - BackendNotSupported(String), - /// backend {0} cannot be loaded: {1:?} - BackendLoadError(String, anyhow::Error), - /// no suitable id found for {0} - NoSuitableIdFound(String), - /// id {0} is not unique - IdNotUnique(String), - /// {0:?} - #[error(transparent)] - FromIoError(#[from] std::io::Error), - /// {0:?} - #[error(transparent)] - FromTryIntError(#[from] TryFromIntError), - #[cfg(feature = "rest")] - /// backoff failed: {0:?} - BackoffError(#[from] backoff::Error), - /// parsing failed for url: `{0:?}` - UrlParsingFailed(#[from] url::ParseError), - /// creating data in backend failed - CreatingDataOnBackendFailed, - /// writing bytes to backend failed - WritingBytesToBackendFailed, - /// removing data from backend failed - RemovingDataFromBackendFailed, - /// failed to list files on Backend - ListingFilesOnBackendFailed, -} - -/// [`RcloneErrorKind`] describes the errors that can be returned by a backend provider -#[derive(Error, Debug, Display)] -#[non_exhaustive] -pub enum RcloneErrorKind { - /// 'rclone version' doesn't give any output - NoOutputForRcloneVersion, - /// cannot get stdout of rclone - NoStdOutForRclone, - /// rclone exited with `{0:?}` - RCloneExitWithBadStatus(ExitStatus), - /// url must start with http:\/\/! url: {0:?} - UrlNotStartingWithHttp(String), - /// StdIo Error: `{0:?}` - #[error(transparent)] - FromIoError(#[from] std::io::Error), - /// utf8 error: `{0:?}` - #[error(transparent)] - FromUtf8Error(#[from] Utf8Error), - /// error parsing verision number from `{0:?}` - FromParseVersion(String), - /// Using rclone without authentication! Upgrade to rclone >= 1.52.2 (current version: `{0}`)! - RCloneWithoutAuthentication(String), -} - -/// [`RestErrorKind`] describes the errors that can be returned while dealing with the REST API -#[derive(Error, Debug, Display)] -#[non_exhaustive] -pub enum RestErrorKind { - /// value `{0:?}` not supported for option retry! - NotSupportedForRetry(String), - /// parsing failed for url: `{0:?}` - UrlParsingFailed(#[from] url::ParseError), - #[cfg(feature = "rest")] - /// requesting resource failed: `{0:?}` - RequestingResourceFailed(#[from] reqwest::Error), - /// couldn't parse duration in humantime library: `{0:?}` - CouldNotParseDuration(#[from] humantime::DurationError), - #[cfg(feature = "rest")] - /// backoff failed: {0:?} - BackoffError(#[from] backoff::Error), - #[cfg(feature = "rest")] - /// Failed to build HTTP client: `{0:?}` - BuildingClientFailed(reqwest::Error), - /// joining URL failed on: {0:?} - JoiningUrlFailed(url::ParseError), -} - -/// [`LocalBackendErrorKind`] describes the errors that can be returned by an action on the filesystem in Backends -#[derive(Error, Debug, Display)] -#[non_exhaustive] -pub enum LocalBackendErrorKind { - /// directory creation failed: `{0:?}` - DirectoryCreationFailed(#[from] std::io::Error), - /// querying metadata failed: `{0:?}` - QueryingMetadataFailed(std::io::Error), - /// querying WalkDir metadata failed: `{0:?}` - QueryingWalkDirMetadataFailed(walkdir::Error), - /// executtion of command failed: `{0:?}` - CommandExecutionFailed(std::io::Error), - /// command was not successful for filename {file_name}, type {file_type}, id {id}: {status} - CommandNotSuccessful { - /// File name - file_name: String, - /// File type - file_type: String, - /// Item ID - id: String, - /// Exit status - status: ExitStatus, - }, - /// error building automaton `{0:?}` - FromAhoCorasick(#[from] aho_corasick::BuildError), - /// {0:?} - #[error(transparent)] - FromTryIntError(#[from] TryFromIntError), - /// {0:?} - #[error(transparent)] - FromWalkdirError(#[from] walkdir::Error), - /// removing file failed: `{0:?}` - FileRemovalFailed(std::io::Error), - /// opening file failed: `{0:?}` - OpeningFileFailed(std::io::Error), - /// setting file length failed: `{0:?}` - SettingFileLengthFailed(std::io::Error), - /// can't jump to position in file: `{0:?}` - CouldNotSeekToPositionInFile(std::io::Error), - /// couldn't write to buffer: `{0:?}` - CouldNotWriteToBuffer(std::io::Error), - /// reading file contents failed: `{0:?}` - ReadingContentsOfFileFailed(std::io::Error), - /// reading exact length of file contents failed: `{0:?}` - ReadingExactLengthOfFileFailed(std::io::Error), - /// failed to sync OS Metadata to disk: `{0:?}` - SyncingOfOsMetadataFailed(std::io::Error), -} diff --git a/crates/backend/src/lib.rs b/crates/backend/src/lib.rs index e24da48f1..60a364401 100644 --- a/crates/backend/src/lib.rs +++ b/crates/backend/src/lib.rs @@ -53,27 +53,22 @@ This crate exposes a few features for controlling dependency usage: */ pub mod choose; -/// Error types for the backend. -pub mod error; /// Local backend for Rustic. pub mod local; +/// Utility functions for the backend. +pub mod util; + /// `OpenDAL` backend for Rustic. #[cfg(feature = "opendal")] pub mod opendal; + /// `Rclone` backend for Rustic. #[cfg(feature = "rclone")] pub mod rclone; + /// REST backend for Rustic. #[cfg(feature = "rest")] pub mod rest; -/// Utility functions for the backend. -pub mod util; - -// rustic_backend Public API -pub use crate::{ - choose::{BackendOptions, SupportedBackend}, - local::LocalBackend, -}; #[cfg(feature = "opendal")] pub use crate::opendal::OpenDALBackend; @@ -83,3 +78,9 @@ pub use crate::rclone::RcloneBackend; #[cfg(feature = "rest")] pub use crate::rest::RestBackend; + +// rustic_backend Public API +pub use crate::{ + choose::{BackendOptions, SupportedBackend}, + local::LocalBackend, +}; diff --git a/crates/backend/src/local.rs b/crates/backend/src/local.rs index df52ade57..83994881a 100644 --- a/crates/backend/src/local.rs +++ b/crates/backend/src/local.rs @@ -6,14 +6,62 @@ use std::{ }; use aho_corasick::AhoCorasick; -use anyhow::Result; use bytes::Bytes; use log::{debug, trace, warn}; use walkdir::WalkDir; -use rustic_core::{CommandInput, FileType, Id, ReadBackend, WriteBackend, ALL_FILE_TYPES}; +use rustic_core::{ + CommandInput, FileType, Id, ReadBackend, RusticResult, WriteBackend, ALL_FILE_TYPES, +}; -use crate::error::LocalBackendErrorKind; +/// [`LocalBackendErrorKind`] describes the errors that can be returned by an action on the filesystem in Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum LocalBackendErrorKind { + /// directory creation failed: `{0:?}` + DirectoryCreationFailed(std::io::Error), + /// querying metadata failed: `{0:?}` + QueryingMetadataFailed(std::io::Error), + /// querying WalkDir metadata failed: `{0:?}` + QueryingWalkDirMetadataFailed(walkdir::Error), + /// execution of command failed: `{0:?}` + CommandExecutionFailed(std::io::Error), + /// command was not successful for filename {file_name}, type {file_type}, id {id}: {status} + CommandNotSuccessful { + /// File name + file_name: String, + /// File type + file_type: String, + /// Item ID + id: String, + /// Exit status + status: ExitStatus, + }, + /// error building automaton `{0:?}` + FromAhoCorasick(aho_corasick::BuildError), + /// {0:?} + #[error(transparent)] + FromTryIntError(TryFromIntError), + /// {0:?} + #[error(transparent)] + FromWalkdirError(walkdir::Error), + /// removing file failed: `{0:?}` + FileRemovalFailed(std::io::Error), + /// opening file failed: `{0:?}` + OpeningFileFailed(std::io::Error), + /// setting file length failed: `{0:?}` + SettingFileLengthFailed(std::io::Error), + /// can't jump to position in file: `{0:?}` + CouldNotSeekToPositionInFile(std::io::Error), + /// couldn't write to buffer: `{0:?}` + CouldNotWriteToBuffer(std::io::Error), + /// reading file contents failed: `{0:?}` + ReadingContentsOfFileFailed(std::io::Error), + /// reading exact length of file contents failed: `{0:?}` + ReadingExactLengthOfFileFailed(std::io::Error), + /// failed to sync OS Metadata to disk: `{0:?}` + SyncingOfOsMetadataFailed(std::io::Error), +} /// A local backend. #[derive(Clone, Debug)] @@ -41,7 +89,7 @@ impl LocalBackend { pub fn new( path: impl AsRef, options: impl IntoIterator, - ) -> Result { + ) -> RusticResult { let path = path.as_ref().into(); let mut post_create_command = None; let mut post_delete_command = None; @@ -58,6 +106,7 @@ impl LocalBackend { } } } + Ok(Self { path, post_create_command, @@ -113,7 +162,7 @@ impl LocalBackend { /// [`LocalBackendErrorKind::FromSplitError`]: LocalBackendErrorKind::FromSplitError /// [`LocalBackendErrorKind::CommandExecutionFailed`]: LocalBackendErrorKind::CommandExecutionFailed /// [`LocalBackendErrorKind::CommandNotSuccessful`]: LocalBackendErrorKind::CommandNotSuccessful - fn call_command(tpe: FileType, id: &Id, filename: &Path, command: &str) -> Result<()> { + fn call_command(tpe: FileType, id: &Id, filename: &Path, command: &str) -> RusticResult<()> { let id = id.to_hex(); let patterns = &["%file", "%type", "%id"]; let ac = AhoCorasick::new(patterns).map_err(LocalBackendErrorKind::FromAhoCorasick)?; @@ -157,7 +206,7 @@ impl ReadBackend for LocalBackend { /// # Notes /// /// If the file type is `FileType::Config`, this will return a list with a single default id. - fn list(&self, tpe: FileType) -> Result> { + fn list(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); if tpe == FileType::Config { return Ok(if self.path.join("config").exists() { @@ -190,7 +239,7 @@ impl ReadBackend for LocalBackend { /// [`LocalBackendErrorKind::QueryingMetadataFailed`]: LocalBackendErrorKind::QueryingMetadataFailed /// [`LocalBackendErrorKind::FromTryIntError`]: LocalBackendErrorKind::FromTryIntError /// [`LocalBackendErrorKind::QueryingWalkDirMetadataFailed`]: LocalBackendErrorKind::QueryingWalkDirMetadataFailed - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); let path = self.path.join(tpe.dirname()); @@ -213,17 +262,19 @@ impl ReadBackend for LocalBackend { .into_iter() .filter_map(walkdir::Result::ok) .filter(|e| e.file_type().is_file()) - .map(|e| -> Result<_> { + .map(|e| -> RusticResult<_> { Ok(( e.file_name().to_string_lossy().parse()?, e.metadata() - .map_err(LocalBackendErrorKind::QueryingWalkDirMetadataFailed)? + .map_err(LocalBackendErrorKind::QueryingWalkDirMetadataFailed) + .map_err(|_err| todo!("Error transition"))? .len() .try_into() - .map_err(LocalBackendErrorKind::FromTryIntError)?, + .map_err(LocalBackendErrorKind::FromTryIntError) + .map_err(|_err| todo!("Error transition"))?, )) }) - .filter_map(Result::ok); + .filter_map(RusticResult::ok); Ok(walker.collect()) } @@ -240,7 +291,7 @@ impl ReadBackend for LocalBackend { /// * [`LocalBackendErrorKind::ReadingContentsOfFileFailed`] - If the file could not be read. /// /// [`LocalBackendErrorKind::ReadingContentsOfFileFailed`]: LocalBackendErrorKind::ReadingContentsOfFileFailed - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}"); Ok(fs::read(self.path(tpe, id)) .map_err(LocalBackendErrorKind::ReadingContentsOfFileFailed)? @@ -260,7 +311,7 @@ impl ReadBackend for LocalBackend { /// # Errors /// /// * [`LocalBackendErrorKind::OpeningFileFailed`] - If the file could not be opened. - /// * [`LocalBackendErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. + /// * [`LocalBackendErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be sought to the given position. /// * [`LocalBackendErrorKind::FromTryIntError`] - If the length of the file could not be converted to u32. /// * [`LocalBackendErrorKind::ReadingExactLengthOfFileFailed`] - If the length of the file could not be read. /// @@ -275,7 +326,7 @@ impl ReadBackend for LocalBackend { _cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}, offset: {offset}, length: {length}"); let mut file = File::open(self.path(tpe, id)).map_err(LocalBackendErrorKind::OpeningFileFailed)?; @@ -302,7 +353,7 @@ impl WriteBackend for LocalBackend { /// * [`LocalBackendErrorKind::DirectoryCreationFailed`] - If the directory could not be created. /// /// [`LocalBackendErrorKind::DirectoryCreationFailed`]: LocalBackendErrorKind::DirectoryCreationFailed - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { trace!("creating repo at {:?}", self.path); fs::create_dir_all(&self.path).map_err(LocalBackendErrorKind::DirectoryCreationFailed)?; @@ -339,7 +390,13 @@ impl WriteBackend for LocalBackend { /// [`LocalBackendErrorKind::SettingFileLengthFailed`]: LocalBackendErrorKind::SettingFileLengthFailed /// [`LocalBackendErrorKind::CouldNotWriteToBuffer`]: LocalBackendErrorKind::CouldNotWriteToBuffer /// [`LocalBackendErrorKind::SyncingOfOsMetadataFailed`]: LocalBackendErrorKind::SyncingOfOsMetadataFailed - fn write_bytes(&self, tpe: FileType, id: &Id, _cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes( + &self, + tpe: FileType, + id: &Id, + _cacheable: bool, + buf: Bytes, + ) -> RusticResult<()> { trace!("writing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); let mut file = fs::OpenOptions::new() @@ -379,7 +436,7 @@ impl WriteBackend for LocalBackend { /// * [`LocalBackendErrorKind::FileRemovalFailed`] - If the file could not be removed. /// /// [`LocalBackendErrorKind::FileRemovalFailed`]: LocalBackendErrorKind::FileRemovalFailed - fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> RusticResult<()> { trace!("removing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); fs::remove_file(&filename).map_err(LocalBackendErrorKind::FileRemovalFailed)?; diff --git a/crates/backend/src/opendal.rs b/crates/backend/src/opendal.rs index 915b1ec98..4cd300879 100644 --- a/crates/backend/src/opendal.rs +++ b/crates/backend/src/opendal.rs @@ -1,7 +1,6 @@ /// `OpenDAL` backend for rustic. use std::{collections::HashMap, path::PathBuf, str::FromStr, sync::OnceLock}; -use anyhow::{anyhow, Error, Result}; use bytes::Bytes; use bytesize::ByteSize; use log::trace; @@ -12,7 +11,7 @@ use opendal::{ use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use tokio::runtime::Runtime; -use rustic_core::{FileType, Id, ReadBackend, WriteBackend, ALL_FILE_TYPES}; +use rustic_core::{FileType, Id, ReadBackend, RusticResult, WriteBackend, ALL_FILE_TYPES}; mod constants { /// Default number of retries @@ -45,16 +44,20 @@ pub struct Throttle { } impl FromStr for Throttle { - type Err = Error; - fn from_str(s: &str) -> Result { + type Err = RusticError; + fn from_str(s: &str) -> RusticResult { let mut values = s .split(',') - .map(|s| ByteSize::from_str(s.trim()).map_err(|err| anyhow!("Error: {err}"))) - .map(|b| -> Result { Ok(b?.as_u64().try_into()?) }); + .map(|s| { + ByteSize::from_str(s.trim()).map_err(|err| Err(format!("Error: {err}")).into()) + }) + .map(|b| -> RusticResult { Ok(b?.as_u64().try_into()?) }); let bandwidth = values .next() - .ok_or_else(|| anyhow!("no bandwidth given"))??; - let burst = values.next().ok_or_else(|| anyhow!("no burst given"))??; + .ok_or_else(|| Err("no bandwidth given".to_string()).into())??; + let burst = values + .next() + .ok_or_else(|| Err("no burst given".to_string()).into())??; Ok(Self { bandwidth, burst }) } } @@ -74,7 +77,7 @@ impl OpenDALBackend { /// # Returns /// /// A new `OpenDAL` backend. - pub fn new(path: impl AsRef, options: HashMap) -> Result { + pub fn new(path: impl AsRef, options: HashMap) -> RusticResult { let max_retries = match options.get("retry").map(String::as_str) { Some("false" | "off") => 0, None | Some("default") => constants::DEFAULT_RETRY, @@ -154,7 +157,7 @@ impl ReadBackend for OpenDALBackend { /// # Notes /// /// If the file type is `FileType::Config`, this will return a list with a single default id. - fn list(&self, tpe: FileType) -> Result> { + fn list(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); if tpe == FileType::Config { return Ok(if self.operator.is_exist("config")? { @@ -181,7 +184,7 @@ impl ReadBackend for OpenDALBackend { /// /// * `tpe` - The type of the files to list. /// - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); if tpe == FileType::Config { return match self.operator.stat("config") { @@ -199,14 +202,14 @@ impl ReadBackend for OpenDALBackend { .call()? .into_iter() .filter(|e| e.metadata().is_file()) - .map(|e| -> Result<(Id, u32)> { + .map(|e| -> RusticResult<(Id, u32)> { Ok((e.name().parse()?, e.metadata().content_length().try_into()?)) }) - .filter_map(Result::ok) + .filter_map(RusticResult::ok) .collect()) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}"); Ok(self.operator.read(&self.path(tpe, id))?.to_bytes()) @@ -219,7 +222,7 @@ impl ReadBackend for OpenDALBackend { _cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}, offset: {offset}, length: {length}"); let range = u64::from(offset)..u64::from(offset + length); Ok(self @@ -233,7 +236,7 @@ impl ReadBackend for OpenDALBackend { impl WriteBackend for OpenDALBackend { /// Create a repository on the backend. - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { trace!("creating repo at {:?}", self.location()); for tpe in ALL_FILE_TYPES { @@ -262,7 +265,13 @@ impl WriteBackend for OpenDALBackend { /// * `id` - The id of the file. /// * `cacheable` - Whether the file is cacheable. /// * `buf` - The bytes to write. - fn write_bytes(&self, tpe: FileType, id: &Id, _cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes( + &self, + tpe: FileType, + id: &Id, + _cacheable: bool, + buf: Bytes, + ) -> RusticResult<()> { trace!("writing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); self.operator.write(&filename, buf)?; @@ -276,7 +285,7 @@ impl WriteBackend for OpenDALBackend { /// * `tpe` - The type of the file. /// * `id` - The id of the file. /// * `cacheable` - Whether the file is cacheable. - fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> RusticResult<()> { trace!("removing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); self.operator.delete(&filename)?; @@ -314,7 +323,7 @@ mod tests { #[rstest] fn new_opendal_backend( #[files("tests/fixtures/opendal/*.toml")] test_case: PathBuf, - ) -> Result<()> { + ) -> RusticResult<()> { #[derive(Deserialize)] struct TestCase { path: String, diff --git a/crates/backend/src/rclone.rs b/crates/backend/src/rclone.rs index 84e72a862..8d66e434a 100644 --- a/crates/backend/src/rclone.rs +++ b/crates/backend/src/rclone.rs @@ -5,7 +5,6 @@ use std::{ thread::JoinHandle, }; -use anyhow::Result; use bytes::Bytes; use constants::DEFAULT_COMMAND; use log::{debug, info}; @@ -15,9 +14,33 @@ use rand::{ }; use semver::{BuildMetadata, Prerelease, Version, VersionReq}; -use crate::{error::RcloneErrorKind, rest::RestBackend}; - -use rustic_core::{CommandInput, FileType, Id, ReadBackend, WriteBackend}; +use crate::rest::RestBackend; + +use rustic_core::{CommandInput, FileType, Id, ReadBackend, RusticResult, WriteBackend}; + +/// [`RcloneErrorKind`] describes the errors that can be returned by a backend provider +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum RcloneErrorKind { + /// 'rclone version' doesn't give any output + NoOutputForRcloneVersion, + /// cannot get stdout of rclone + NoStdOutForRclone, + /// rclone exited with `{0:?}` + RCloneExitWithBadStatus(ExitStatus), + /// url must start with http:\/\/! url: {0:?} + UrlNotStartingWithHttp(String), + /// StdIo Error: `{0:?}` + #[error(transparent)] + FromIoError(std::io::Error), + /// utf8 error: `{0:?}` + #[error(transparent)] + FromUtf8Error(Utf8Error), + /// error parsing version number from `{0:?}` + FromParseVersion(String), + /// Using rclone without authentication! Upgrade to rclone >= 1.52.2 (current version: `{0}`)! + RCloneWithoutAuthentication(String), +} pub(super) mod constants { /// The default command called if no other is specified @@ -70,7 +93,7 @@ impl Drop for RcloneBackend { /// [`RcloneErrorKind::FromUtf8Error`]: RcloneErrorKind::FromUtf8Error /// [`RcloneErrorKind::NoOutputForRcloneVersion`]: RcloneErrorKind::NoOutputForRcloneVersion /// [`RcloneErrorKind::FromParseVersion`]: RcloneErrorKind::FromParseVersion -fn check_clone_version(rclone_version_output: &[u8]) -> Result<()> { +fn check_clone_version(rclone_version_output: &[u8]) -> RusticResult<()> { let rclone_version = std::str::from_utf8(rclone_version_output) .map_err(RcloneErrorKind::FromUtf8Error)? .lines() @@ -127,7 +150,7 @@ impl RcloneBackend { /// /// If the rclone command is not found. // TODO: This should be an error, not a panic. - pub fn new(url: impl AsRef, options: HashMap) -> Result { + pub fn new(url: impl AsRef, options: HashMap) -> RusticResult { let rclone_command = options.get("rclone-command"); let use_password = options .get("use-password") @@ -247,7 +270,7 @@ impl ReadBackend for RcloneBackend { /// * `tpe` - The type of the file. /// /// If the size could not be determined. - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.rest.list_with_size(tpe) } @@ -261,7 +284,7 @@ impl ReadBackend for RcloneBackend { /// # Returns /// /// The data read. - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.rest.read_full(tpe, id) } @@ -285,14 +308,14 @@ impl ReadBackend for RcloneBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { self.rest.read_partial(tpe, id, cacheable, offset, length) } } impl WriteBackend for RcloneBackend { /// Creates a new file. - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.rest.create() } @@ -304,7 +327,7 @@ impl WriteBackend for RcloneBackend { /// * `id` - The id of the file. /// * `cacheable` - Whether the data should be cached. /// * `buf` - The data to write. - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { self.rest.write_bytes(tpe, id, cacheable, buf) } @@ -315,7 +338,7 @@ impl WriteBackend for RcloneBackend { /// * `tpe` - The type of the file. /// * `id` - The id of the file. /// * `cacheable` - Whether the file is cacheable. - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { self.rest.remove(tpe, id, cacheable) } } diff --git a/crates/backend/src/rest.rs b/crates/backend/src/rest.rs index 3b0b5683a..46bbb0390 100644 --- a/crates/backend/src/rest.rs +++ b/crates/backend/src/rest.rs @@ -1,9 +1,9 @@ use std::str::FromStr; use std::time::Duration; -use anyhow::Result; use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; use bytes::Bytes; +use displaydoc::Display; use log::{trace, warn}; use reqwest::{ blocking::{Client, ClientBuilder, Response}, @@ -11,11 +11,33 @@ use reqwest::{ Url, }; use serde::Deserialize; - -use crate::error::RestErrorKind; +use thiserror::Error; use rustic_core::{FileType, Id, ReadBackend, WriteBackend}; +/// [`RestErrorKind`] describes the errors that can be returned while dealing with the REST API +#[derive(Error, Debug, Display)] +#[non_exhaustive] +pub enum RestErrorKind { + /// value `{0:?}` not supported for option retry! + NotSupportedForRetry(String), + /// parsing failed for url: `{0:?}` + UrlParsingFailed(url::ParseError), + #[cfg(feature = "rest")] + /// requesting resource failed: `{0:?}` + RequestingResourceFailed(reqwest::Error), + /// couldn't parse duration in humantime library: `{0:?}` + CouldNotParseDuration(humantime::DurationError), + #[cfg(feature = "rest")] + /// backoff failed: {0:?} + BackoffError(backoff::Error), + #[cfg(feature = "rest")] + /// Failed to build HTTP client: `{0:?}` + BuildingClientFailed(reqwest::Error), + /// joining URL failed on: {0:?} + JoiningUrlFailed(url::ParseError), +} + mod consts { /// Default number of retries pub(super) const DEFAULT_RETRY: usize = 5; @@ -138,7 +160,7 @@ impl RestBackend { pub fn new( url: impl AsRef, options: impl IntoIterator, - ) -> Result { + ) -> RusticResult { let url = url.as_ref(); let url = if url.ends_with('/') { Url::parse(url).map_err(RestErrorKind::UrlParsingFailed)? @@ -197,7 +219,7 @@ impl RestBackend { /// # Errors /// /// If the url could not be created. - fn url(&self, tpe: FileType, id: &Id) -> Result { + fn url(&self, tpe: FileType, id: &Id) -> Result { let id_path = if tpe == FileType::Config { "config".to_string() } else { @@ -245,7 +267,7 @@ impl ReadBackend for RestBackend { /// A vector of tuples containing the id and size of the files. /// /// [`RestErrorKind::JoiningUrlFailed`]: RestErrorKind::JoiningUrlFailed - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { // format which is delivered by the REST-service #[derive(Deserialize)] struct ListEntry { @@ -313,7 +335,7 @@ impl ReadBackend for RestBackend { /// * [`RestErrorKind::BackoffError`] - If the backoff failed. /// /// [`RestErrorKind::BackoffError`]: RestErrorKind::BackoffError - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}"); let url = self.url(tpe, id)?; Ok(backoff::retry_notify( @@ -353,7 +375,7 @@ impl ReadBackend for RestBackend { _cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}, offset: {offset}, length: {length}"); let offset2 = offset + length - 1; let header_value = format!("bytes={offset}-{offset2}"); @@ -383,7 +405,7 @@ impl WriteBackend for RestBackend { /// * [`RestErrorKind::BackoffError`] - If the backoff failed. /// /// [`RestErrorKind::BackoffError`]: RestErrorKind::BackoffError - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { let url = self .url .join("?create=true") @@ -413,13 +435,19 @@ impl WriteBackend for RestBackend { /// * [`RestErrorKind::BackoffError`] - If the backoff failed. /// /// [`RestErrorKind::BackoffError`]: RestErrorKind::BackoffError - fn write_bytes(&self, tpe: FileType, id: &Id, _cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes( + &self, + tpe: FileType, + id: &Id, + _cacheable: bool, + buf: Bytes, + ) -> RusticResult<()> { trace!("writing tpe: {:?}, id: {}", &tpe, &id); let req_builder = self.client.post(self.url(tpe, id)?).body(buf); Ok(backoff::retry_notify( self.backoff.clone(), || { - // Note: try_clone() always gives Some(_) as the body is Bytes which is clonable + // Note: try_clone() always gives Some(_) as the body is Bytes which is cloneable _ = req_builder.try_clone().unwrap().send()?.check_error()?; Ok(()) }, @@ -441,7 +469,7 @@ impl WriteBackend for RestBackend { /// * [`RestErrorKind::BackoffError`] - If the backoff failed. /// /// [`RestErrorKind::BackoffError`]: RestErrorKind::BackoffError - fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> RusticResult<()> { trace!("removing tpe: {:?}, id: {}", &tpe, &id); let url = self.url(tpe, id)?; Ok(backoff::retry_notify( diff --git a/crates/backend/src/util.rs b/crates/backend/src/util.rs index ecae470b3..642b722d5 100644 --- a/crates/backend/src/util.rs +++ b/crates/backend/src/util.rs @@ -1,5 +1,5 @@ use crate::SupportedBackend; -use anyhow::Result; +use rustic_core::{BackendErrorKind, BackendResult}; /// A backend location. This is a string that represents the location of the backend. #[derive(PartialEq, Eq, Debug)] @@ -45,7 +45,7 @@ impl std::fmt::Display for BackendLocation { /// If the url is a windows path, the type will be "local". pub fn location_to_type_and_path( raw_location: &str, -) -> Result<(SupportedBackend, BackendLocation)> { +) -> RusticResult<(SupportedBackend, BackendLocation)> { match raw_location.split_once(':') { #[cfg(windows)] Some((drive_letter, _)) if drive_letter.len() == 1 && !raw_location.contains('/') => Ok(( @@ -65,6 +65,9 @@ pub fn location_to_type_and_path( SupportedBackend::Local, BackendLocation(raw_location.to_string()), )), + _ => Err(BackendErrorKind::BackendLocationNotConvertible { + location: raw_location.to_string(), + }), } } diff --git a/crates/core/CHANGELOG.md b/crates/core/CHANGELOG.md index 642f9d483..2fc5ee5b0 100644 --- a/crates/core/CHANGELOG.md +++ b/crates/core/CHANGELOG.md @@ -73,9 +73,11 @@ All notable changes to this project will be documented in this file. ## [0.3.1](https://github.com/rustic-rs/rustic_core/compare/rustic_core-v0.3.0...rustic_core-v0.3.1) - 2024-09-06 ### Added + - Add autocompletion hints ([#257](https://github.com/rustic-rs/rustic_core/pull/257)) ### Fixed + - don't give invalid password error for other keyfile errors ([#247](https://github.com/rustic-rs/rustic_core/pull/247)) - adjust tests to new Rust version ([#259](https://github.com/rustic-rs/rustic_core/pull/259)) - fix FromStr for SnapshotGroupCriterion ([#254](https://github.com/rustic-rs/rustic_core/pull/254)) @@ -86,6 +88,7 @@ All notable changes to this project will be documented in this file. ## [0.3.0](https://github.com/rustic-rs/rustic_core/compare/rustic_core-v0.2.0...rustic_core-v0.3.0) - 2024-08-18 ### Added + - *(forget)* [**breaking**] Make keep-* Options and add keep-none ([#238](https://github.com/rustic-rs/rustic_core/pull/238)) - add search methods to Repository ([#212](https://github.com/rustic-rs/rustic_core/pull/212)) - [**breaking**] Allow specifying many options in config profile without array ([#211](https://github.com/rustic-rs/rustic_core/pull/211)) @@ -98,6 +101,7 @@ All notable changes to this project will be documented in this file. - Add append-only repository mode ([#164](https://github.com/rustic-rs/rustic_core/pull/164)) ### Fixed + - parse commands given by arg or env using shell_words ([#240](https://github.com/rustic-rs/rustic_core/pull/240)) - Allow non-value/null xattr ([#235](https://github.com/rustic-rs/rustic_core/pull/235)) - ensure Rust 1.76.0 compiles @@ -109,7 +113,7 @@ All notable changes to this project will be documented in this file. - clippy lints ([#220](https://github.com/rustic-rs/rustic_core/pull/220)) - *(errors)* Show filenames in error message coming from ignore source ([#215](https://github.com/rustic-rs/rustic_core/pull/215)) - *(paths)* Handle paths starting with "." correctly ([#213](https://github.com/rustic-rs/rustic_core/pull/213)) -- Add warning about unsorted files and sort where neccessary ([#205](https://github.com/rustic-rs/rustic_core/pull/205)) +- Add warning about unsorted files and sort where necessary ([#205](https://github.com/rustic-rs/rustic_core/pull/205)) - *(deps)* update rust crate thiserror to 1.0.58 ([#192](https://github.com/rustic-rs/rustic_core/pull/192)) - *(deps)* update rust crate anyhow to 1.0.81 ([#191](https://github.com/rustic-rs/rustic_core/pull/191)) - *(deps)* update rust crate serde_with to 3.7.0 ([#189](https://github.com/rustic-rs/rustic_core/pull/189)) @@ -123,6 +127,7 @@ All notable changes to this project will be documented in this file. - updated msrv and fix clippy lints ([#160](https://github.com/rustic-rs/rustic_core/pull/160)) ### Other + - dependency updates - Ensure that MSRV 1.76 works - *(deps)* more version updates ([#237](https://github.com/rustic-rs/rustic_core/pull/237)) diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 374f5fdef..52a3f12ec 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -95,7 +95,6 @@ futures = { version = "0.3.30", optional = true } runtime-format = "0.1.3" # other dependencies -anyhow = { workspace = true } bytes = { workspace = true } bytesize = "1.3.0" chrono = { version = "0.4.38", default-features = false, features = ["clock", "serde"] } @@ -123,6 +122,7 @@ sha2 = "0.10.8" xattr = "1" [dev-dependencies] +anyhow = { workspace = true } expect-test = "1.5.0" flate2 = "1.0.34" globset = "0.4.15" diff --git a/crates/core/src/archiver.rs b/crates/core/src/archiver.rs index 9a304a7bd..4dedeb47b 100644 --- a/crates/core/src/archiver.rs +++ b/crates/core/src/archiver.rs @@ -16,11 +16,34 @@ use crate::{ }, backend::{decrypt::DecryptFullBackend, ReadSource, ReadSourceEntry}, blob::BlobType, - index::{indexer::Indexer, indexer::SharedIndexer, ReadGlobalIndex}, + error::RusticResult, + index::{ + indexer::{Indexer, SharedIndexer}, + ReadGlobalIndex, + }, repofile::{configfile::ConfigFile, snapshotfile::SnapshotFile}, - Progress, RusticResult, + Progress, }; +/// [`ArchiverErrorKind`] describes the errors that can be returned from the archiver +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum ArchiverErrorKind { + /// tree stack empty + TreeStackEmpty, + /// cannot open file or directory `{path}` + OpeningFileFailed { + /// path of the file + path: PathBuf, + }, + /// option should contain a value, but contained `None` + UnpackingTreeTypeOptionalFailed, + /// couldn't determine size for item in Archiver + CouldNotDetermineSize, +} + +pub(crate) type ArchiverResult = Result; + /// The `Archiver` is responsible for archiving files and trees. /// It will read the file, chunk it, and write the chunks to the backend. /// @@ -84,6 +107,7 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> { let file_archiver = FileArchiver::new(be.clone(), index, indexer.clone(), config)?; let tree_archiver = TreeArchiver::new(be.clone(), index, indexer.clone(), config, summary)?; + Ok(Self { file_archiver, tree_archiver, @@ -201,8 +225,12 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> { }) .try_for_each(|item| self.tree_archiver.add(item)) }) - .unwrap()?; - src_size_handle.join().unwrap(); + .expect("Scoped Archiver thread should not panic!")?; + + src_size_handle + .join() + .expect("Scoped Size Handler thread should not panic!"); + Ok(()) })?; @@ -213,7 +241,9 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> { self.indexer.write().unwrap().finalize()?; - summary.finalize(self.snap.time)?; + summary + .finalize(self.snap.time) + .map_err(|_err| todo!("Error transition"))?; self.snap.summary = Some(summary); if !skip_identical_parent || Some(self.snap.tree) != self.parent.tree_id() { diff --git a/crates/core/src/archiver/file_archiver.rs b/crates/core/src/archiver/file_archiver.rs index 74920dda6..eb428784e 100644 --- a/crates/core/src/archiver/file_archiver.rs +++ b/crates/core/src/archiver/file_archiver.rs @@ -5,6 +5,7 @@ use crate::{ parent::{ItemWithParent, ParentResult}, tree::TreeType, tree_archiver::TreeItem, + ArchiverErrorKind, }, backend::{ decrypt::DecryptWriteBackend, @@ -18,7 +19,7 @@ use crate::{ cdc::rolling_hash::Rabin64, chunker::ChunkIter, crypto::hasher::hash, - error::{ArchiverErrorKind, RusticResult}, + error::RusticResult, index::{indexer::SharedIndexer, ReadGlobalIndex}, progress::Progress, repofile::configfile::ConfigFile, @@ -60,7 +61,7 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> { /// /// [`PackerErrorKind::SendingCrossbeamMessageFailed`]: crate::error::PackerErrorKind::SendingCrossbeamMessageFailed /// [`PackerErrorKind::IntConversionFailed`]: crate::error::PackerErrorKind::IntConversionFailed - pub(crate) fn new( + pub fn new( be: BE, index: &'a I, indexer: SharedIndexer, @@ -75,7 +76,9 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> { config, index.total_size(BlobType::Data), )?; + let rabin = Rabin64::new_with_polynom(6, poly); + Ok(Self { index, data_packer, @@ -118,8 +121,11 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> { (node, size) } else if node.node_type == NodeType::File { let r = open - .ok_or(ArchiverErrorKind::UnpackingTreeTypeOptionalFailed)? - .open()?; + .ok_or(ArchiverErrorKind::UnpackingTreeTypeOptionalFailed) + .map_err(|_err| todo!("Error transition"))? + .open() + .map_err(|_| ArchiverErrorKind::OpeningFileFailed { path: path.clone() }) + .map_err(|_err| todo!("Error transition"))?; self.backup_reader(r, node, p)? } else { (node, 0) @@ -138,12 +144,11 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> FileArchiver<'a, BE, I> { ) -> RusticResult<(Node, u64)> { let chunks: Vec<_> = ChunkIter::new( r, - usize::try_from(node.meta.size) - .map_err(ArchiverErrorKind::ConversionFromU64ToUsizeFailed)?, + usize::try_from(node.meta.size).map_err(|_err| todo!("Error transition"))?, self.rabin.clone(), ) .map(|chunk| { - let chunk = chunk.map_err(ArchiverErrorKind::FromStdIo)?; + let chunk = chunk.map_err(|_err| todo!("Error transition"))?; let id = hash(&chunk); let size = chunk.len() as u64; diff --git a/crates/core/src/archiver/parent.rs b/crates/core/src/archiver/parent.rs index 6db8917f2..87f8cf549 100644 --- a/crates/core/src/archiver/parent.rs +++ b/crates/core/src/archiver/parent.rs @@ -6,10 +6,9 @@ use std::{ use log::warn; use crate::{ - archiver::tree::TreeType, + archiver::{tree::TreeType, ArchiverErrorKind, ArchiverResult}, backend::{decrypt::DecryptReadBackend, node::Node}, blob::tree::{Tree, TreeId}, - error::{ArchiverErrorKind, RusticResult}, index::ReadGlobalIndex, }; @@ -221,7 +220,7 @@ impl Parent { /// * [`ArchiverErrorKind::TreeStackEmpty`] - If the tree stack is empty. /// /// [`ArchiverErrorKind::TreeStackEmpty`]: crate::error::ArchiverErrorKind::TreeStackEmpty - fn finish_dir(&mut self) -> RusticResult<()> { + fn finish_dir(&mut self) -> ArchiverResult<()> { let (tree, node_idx) = self .stack .pop() @@ -260,7 +259,7 @@ impl Parent { be: &impl DecryptReadBackend, index: &impl ReadGlobalIndex, item: TreeType, - ) -> RusticResult> { + ) -> ArchiverResult> { let result = match item { TreeType::NewTree((path, node, tree)) => { let parent_result = self diff --git a/crates/core/src/archiver/tree_archiver.rs b/crates/core/src/archiver/tree_archiver.rs index 3e2cc3192..58e703f04 100644 --- a/crates/core/src/archiver/tree_archiver.rs +++ b/crates/core/src/archiver/tree_archiver.rs @@ -4,16 +4,16 @@ use bytesize::ByteSize; use log::{debug, trace}; use crate::{ - archiver::{parent::ParentResult, tree::TreeType}, + archiver::{parent::ParentResult, tree::TreeType, ArchiverErrorKind}, backend::{decrypt::DecryptWriteBackend, node::Node}, blob::{ packer::Packer, tree::{Tree, TreeId}, BlobType, }, - error::{ArchiverErrorKind, RusticResult}, index::{indexer::SharedIndexer, ReadGlobalIndex}, repofile::{configfile::ConfigFile, snapshotfile::SnapshotSummary}, + RusticResult, }; pub(crate) type TreeItem = TreeType<(ParentResult<()>, u64), ParentResult>; @@ -76,6 +76,7 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> TreeArchiver<'a, BE, I> { config, index.total_size(BlobType::Tree), )?; + Ok(Self { tree: Tree::new(), stack: Vec::new(), @@ -109,7 +110,8 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> TreeArchiver<'a, BE, I> { let (path, mut node, parent, tree) = self .stack .pop() - .ok_or_else(|| ArchiverErrorKind::TreeStackEmpty)?; + .ok_or_else(|| ArchiverErrorKind::TreeStackEmpty) + .map_err(|_err| todo!("Error transition"))?; // save tree trace!("finishing {path:?}"); @@ -172,7 +174,10 @@ impl<'a, BE: DecryptWriteBackend, I: ReadGlobalIndex> TreeArchiver<'a, BE, I> { /// /// [`PackerErrorKind::SendingCrossbeamMessageFailed`]: crate::error::PackerErrorKind::SendingCrossbeamMessageFailed fn backup_tree(&mut self, path: &Path, parent: &ParentResult) -> RusticResult { - let (chunk, id) = self.tree.serialize()?; + let (chunk, id) = self + .tree + .serialize() + .map_err(|_err| todo!("Error transition"))?; let dirsize = chunk.len() as u64; let dirsize_bytes = ByteSize(dirsize).to_string_as(true); diff --git a/crates/core/src/backend.rs b/crates/core/src/backend.rs index b232ceb16..1b2ddaff5 100644 --- a/crates/core/src/backend.rs +++ b/crates/core/src/backend.rs @@ -10,9 +10,8 @@ pub(crate) mod node; pub(crate) mod stdin; pub(crate) mod warm_up; -use std::{io::Read, ops::Deref, path::PathBuf, sync::Arc}; +use std::{io::Read, num::TryFromIntError, ops::Deref, path::PathBuf, sync::Arc}; -use anyhow::Result; use bytes::Bytes; use enum_map::Enum; use log::trace; @@ -24,11 +23,77 @@ use serde_derive::{Deserialize, Serialize}; use crate::{ backend::node::{Metadata, Node, NodeType}, - error::{BackendAccessErrorKind, RusticErrorKind}, + error::RusticResult, id::Id, - RusticResult, }; +// #[derive(thiserror::Error, Debug, displaydoc::Display)] +// /// Experienced an error in the backend: `{0}` +// pub struct BackendDynError(pub Box); + +/// [`BackendErrorKind`] describes the errors that can be returned by the various Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum BackendErrorKind { + /// backend `{0:?}` is not supported! + BackendNotSupported(String), + /// no suitable id found for `{0}` + NoSuitableIdFound(String), + /// id `{0}` is not unique + IdNotUnique(String), + /// creating data in backend failed + CreatingDataOnBackendFailed, + /// writing bytes to backend failed + WritingBytesToBackendFailed, + /// removing data from backend failed + RemovingDataFromBackendFailed, + /// failed to list files on Backend + ListingFilesOnBackendFailed, + /// Path is not allowed: `{0:?}` + PathNotAllowed(PathBuf), + /// Backend location not convertible: `{location}` + BackendLocationNotConvertible { location: String }, +} + +/// [`CryptBackendErrorKind`] describes the errors that can be returned by a Decryption action in Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum CryptBackendErrorKind { + /// decryption not supported for backend + DecryptionNotSupportedForBackend, + /// length of uncompressed data does not match! + LengthOfUncompressedDataDoesNotMatch, + /// failed to read encrypted data during full read + DecryptionInFullReadFailed, + /// failed to read encrypted data during partial read + DecryptionInPartialReadFailed, + /// decrypting from backend failed + DecryptingFromBackendFailed, + /// deserializing from bytes of JSON Text failed: `{0:?}` + DeserializingFromBytesOfJsonTextFailed(serde_json::Error), + /// failed to write data in crypt backend + WritingDataInCryptBackendFailed, + /// failed to list Ids + ListingIdsInDecryptionBackendFailed, + /// writing full hash failed in CryptBackend + WritingFullHashFailed, + /// decoding Zstd compressed data failed: `{0:?}` + DecodingZstdCompressedDataFailed(std::io::Error), + /// Serializing to JSON byte vector failed: `{0:?}` + SerializingToJsonByteVectorFailed(serde_json::Error), + /// encrypting data failed + EncryptingDataFailed, + /// Compressing and appending data failed: `{0:?}` + CopyEncodingDataFailed(std::io::Error), + /// conversion for integer failed: `{0:?}` + IntConversionFailed(TryFromIntError), + /// Extra verification failed: After decrypting and decompressing the data changed! + ExtraVerificationFailed, +} + +pub(crate) type BackendResult = Result; +pub(crate) type CryptBackendResult = Result; + /// All [`FileType`]s which are located in separated directories pub const ALL_FILE_TYPES: [FileType; 4] = [ FileType::Key, @@ -95,7 +160,7 @@ pub trait ReadBackend: Send + Sync + 'static { /// # Errors /// /// If the files could not be listed. - fn list_with_size(&self, tpe: FileType) -> Result>; + fn list_with_size(&self, tpe: FileType) -> RusticResult>; /// Lists all files of the given type. /// @@ -106,7 +171,7 @@ pub trait ReadBackend: Send + Sync + 'static { /// # Errors /// /// If the files could not be listed. - fn list(&self, tpe: FileType) -> Result> { + fn list(&self, tpe: FileType) -> RusticResult> { Ok(self .list_with_size(tpe)? .into_iter() @@ -124,7 +189,7 @@ pub trait ReadBackend: Send + Sync + 'static { /// # Errors /// /// If the file could not be read. - fn read_full(&self, tpe: FileType, id: &Id) -> Result; + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult; /// Reads partial data of the given file. /// @@ -146,7 +211,7 @@ pub trait ReadBackend: Send + Sync + 'static { cacheable: bool, offset: u32, length: u32, - ) -> Result; + ) -> RusticResult; /// Specify if the backend needs a warming-up of files before accessing them. fn needs_warm_up(&self) -> bool { @@ -163,7 +228,7 @@ pub trait ReadBackend: Send + Sync + 'static { /// # Errors /// /// If the file could not be read. - fn warm_up(&self, _tpe: FileType, _id: &Id) -> Result<()> { + fn warm_up(&self, _tpe: FileType, _id: &Id) -> RusticResult<()> { Ok(()) } } @@ -206,7 +271,7 @@ pub trait FindInBackend: ReadBackend { NonUnique, } let mut results = vec![MapResult::None; vec.len()]; - for id in self.list(tpe).map_err(RusticErrorKind::Backend)? { + for id in self.list(tpe)? { let id_hex = id.to_hex(); for (i, v) in vec.iter().enumerate() { if id_hex.starts_with(v.as_ref()) { @@ -224,12 +289,13 @@ pub trait FindInBackend: ReadBackend { .enumerate() .map(|(i, id)| match id { MapResult::Some(id) => Ok(id), - MapResult::None => Err(BackendAccessErrorKind::NoSuitableIdFound( + MapResult::None => Err(BackendErrorKind::NoSuitableIdFound( (vec[i]).as_ref().to_string(), - ) - .into()), + )) + .map_err(|_err| todo!("Error transition")), MapResult::NonUnique => { - Err(BackendAccessErrorKind::IdNotUnique((vec[i]).as_ref().to_string()).into()) + Err(BackendErrorKind::IdNotUnique((vec[i]).as_ref().to_string())) + .map_err(|_err| todo!("Error transition")) } }) .collect() @@ -299,7 +365,7 @@ pub trait WriteBackend: ReadBackend { /// # Returns /// /// The result of the creation. - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { Ok(()) } @@ -319,7 +385,7 @@ pub trait WriteBackend: ReadBackend { /// # Returns /// /// The result of the write. - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()>; + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()>; /// Removes the given file. /// @@ -336,7 +402,7 @@ pub trait WriteBackend: ReadBackend { /// # Returns /// /// The result of the removal. - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()>; + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()>; } #[cfg(test)] @@ -345,8 +411,8 @@ mock! { impl ReadBackend for Backend{ fn location(&self) -> String; - fn list_with_size(&self, tpe: FileType) -> Result>; - fn read_full(&self, tpe: FileType, id: &Id) -> Result; + fn list_with_size(&self, tpe: FileType) -> RusticResult>; + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult; fn read_partial( &self, tpe: FileType, @@ -354,24 +420,24 @@ mock! { cacheable: bool, offset: u32, length: u32, - ) -> Result; + ) -> RusticResult; } impl WriteBackend for Backend { - fn create(&self) -> Result<()>; - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()>; - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()>; + fn create(&self) -> RusticResult<()>; + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()>; + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()>; } } impl WriteBackend for Arc { - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.deref().create() } - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { self.deref().write_bytes(tpe, id, cacheable, buf) } - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { self.deref().remove(tpe, id, cacheable) } } @@ -380,13 +446,13 @@ impl ReadBackend for Arc { fn location(&self) -> String { self.deref().location() } - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.deref().list_with_size(tpe) } - fn list(&self, tpe: FileType) -> Result> { + fn list(&self, tpe: FileType) -> RusticResult> { self.deref().list(tpe) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.deref().read_full(tpe, id) } fn read_partial( @@ -396,7 +462,7 @@ impl ReadBackend for Arc { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { self.deref() .read_partial(tpe, id, cacheable, offset, length) } @@ -429,7 +495,8 @@ impl ReadSourceEntry { fn from_path(path: PathBuf, open: Option) -> RusticResult { let node = Node::new_node( path.file_name() - .ok_or_else(|| BackendAccessErrorKind::PathNotAllowed(path.clone()))?, + .ok_or_else(|| BackendErrorKind::PathNotAllowed(path.clone())) + .map_err(|_err| todo!("Error transition"))?, NodeType::File, Metadata::default(), ); diff --git a/crates/core/src/backend/cache.rs b/crates/core/src/backend/cache.rs index d00d9699e..60dd8e484 100644 --- a/crates/core/src/backend/cache.rs +++ b/crates/core/src/backend/cache.rs @@ -6,7 +6,6 @@ use std::{ sync::Arc, }; -use anyhow::Result; use bytes::Bytes; use dirs::cache_dir; use log::{trace, warn}; @@ -14,11 +13,32 @@ use walkdir::WalkDir; use crate::{ backend::{FileType, ReadBackend, WriteBackend}, - error::{CacheBackendErrorKind, RusticResult}, + error::RusticResult, id::Id, repofile::configfile::RepositoryId, }; +/// [`CacheBackendErrorKind`] describes the errors that can be returned by a Caching action in Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum CacheBackendErrorKind { + /// Cache directory could not be determined, please set the environment variable XDG_CACHE_HOME or HOME! + NoCacheDirectory, + /// Error in cache backend {context} for {tpe:?} with {id}: {source} + Io { + context: String, + source: std::io::Error, + tpe: Option, + id: Id, + }, + /// Ensuring tag failed for cache directory {path}: {source} + EnsureTagFailed { + source: std::io::Error, + path: PathBuf, + }, +} + +pub(crate) type CacheBackendResult = Result; + /// Backend that caches data. /// /// This backend caches data in a directory. @@ -65,7 +85,7 @@ impl ReadBackend for CachedBackend { /// # Returns /// /// A vector of tuples containing the id and size of the files. - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { let list = self.be.list_with_size(tpe)?; if tpe.is_cacheable() { @@ -93,7 +113,7 @@ impl ReadBackend for CachedBackend { /// The data read. /// /// [`CacheBackendErrorKind::FromIoError`]: crate::error::CacheBackendErrorKind::FromIoError - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { if tpe.is_cacheable() { match self.cache.read_full(tpe, id) { Ok(Some(data)) => return Ok(data), @@ -138,7 +158,7 @@ impl ReadBackend for CachedBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { if cacheable || tpe.is_cacheable() { match self.cache.read_partial(tpe, id, offset, length) { Ok(Some(data)) => return Ok(data), @@ -164,14 +184,14 @@ impl ReadBackend for CachedBackend { self.be.needs_warm_up() } - fn warm_up(&self, tpe: FileType, id: &Id) -> Result<()> { + fn warm_up(&self, tpe: FileType, id: &Id) -> RusticResult<()> { self.be.warm_up(tpe, id) } } impl WriteBackend for CachedBackend { /// Creates the backend. - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.be.create() } @@ -185,7 +205,7 @@ impl WriteBackend for CachedBackend { /// * `id` - The id of the file. /// * `cacheable` - Whether the file is cacheable. /// * `buf` - The data to write. - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { if cacheable || tpe.is_cacheable() { if let Err(err) = self.cache.write_bytes(tpe, id, &buf) { warn!("Error in cache backend writing {tpe:?},{id}: {err}"); @@ -202,7 +222,7 @@ impl WriteBackend for CachedBackend { /// /// * `tpe` - The type of the file. /// * `id` - The id of the file. - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { if cacheable || tpe.is_cacheable() { if let Err(err) = self.cache.remove(tpe, id) { warn!("Error in cache backend removing {tpe:?},{id}: {err}"); @@ -240,14 +260,40 @@ impl Cache { let mut path = if let Some(p) = path { p } else { - let mut dir = cache_dir().ok_or_else(|| CacheBackendErrorKind::NoCacheDirectory)?; + let mut dir = cache_dir() + .ok_or_else(|| CacheBackendErrorKind::NoCacheDirectory) + .map_err(|_err| todo!("Error transition"))?; dir.push("rustic"); dir }; - fs::create_dir_all(&path).map_err(CacheBackendErrorKind::FromIoError)?; - cachedir::ensure_tag(&path).map_err(CacheBackendErrorKind::FromIoError)?; + + fs::create_dir_all(&path) + .map_err(|err| CacheBackendErrorKind::Io { + context: "while creating cache directory".into(), + source: err, + tpe: None, + id: id.clone().into_inner(), + }) + .map_err(|_err| todo!("Error transition"))?; + + cachedir::ensure_tag(&path) + .map_err(|err| CacheBackendErrorKind::EnsureTagFailed { + source: err, + path: path.clone(), + }) + .map_err(|_err| todo!("Error transition"))?; + path.push(id.to_hex()); - fs::create_dir_all(&path).map_err(CacheBackendErrorKind::FromIoError)?; + + fs::create_dir_all(&path) + .map_err(|err| CacheBackendErrorKind::Io { + context: "while creating cache directory with id".into(), + source: err, + tpe: None, + id: id.clone().into_inner(), + }) + .map_err(|_err| todo!("Error transition"))?; + Ok(Self { path }) } @@ -380,7 +426,13 @@ impl Cache { Ok(Some(data.into())) } Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), - Err(err) => Err(CacheBackendErrorKind::FromIoError(err).into()), + Err(err) => Err(CacheBackendErrorKind::Io { + context: "while reading full data of file".into(), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition")), } } @@ -414,14 +466,22 @@ impl Cache { let mut file = match File::open(self.path(tpe, id)) { Ok(file) => file, Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None), - Err(err) => return Err(CacheBackendErrorKind::FromIoError(err).into()), + Err(err) => { + return Err(CacheBackendErrorKind::Io { + context: "while opening file".into(), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition")) + } }; _ = file .seek(SeekFrom::Start(u64::from(offset))) - .map_err(CacheBackendErrorKind::FromIoError)?; + .map_err(|_err| todo!("Error transition"))?; let mut vec = vec![0; length as usize]; file.read_exact(&mut vec) - .map_err(CacheBackendErrorKind::FromIoError)?; + .map_err(|_err| todo!("Error transition"))?; trace!("cache hit!"); Ok(Some(vec.into())) } @@ -441,16 +501,40 @@ impl Cache { /// [`CacheBackendErrorKind::FromIoError`]: crate::error::CacheBackendErrorKind::FromIoError pub fn write_bytes(&self, tpe: FileType, id: &Id, buf: &Bytes) -> RusticResult<()> { trace!("cache writing tpe: {:?}, id: {}", &tpe, &id); - fs::create_dir_all(self.dir(tpe, id)).map_err(CacheBackendErrorKind::FromIoError)?; + + fs::create_dir_all(self.dir(tpe, id)) + .map_err(|err| CacheBackendErrorKind::Io { + context: "while creating directories".into(), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition"))?; + let filename = self.path(tpe, id); + let mut file = fs::OpenOptions::new() .create(true) .truncate(true) .write(true) - .open(filename) - .map_err(CacheBackendErrorKind::FromIoError)?; + .open(&filename) + .map_err(|err| CacheBackendErrorKind::Io { + context: "while opening file paths".into(), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition"))?; + file.write_all(buf) - .map_err(CacheBackendErrorKind::FromIoError)?; + .map_err(|err| CacheBackendErrorKind::Io { + context: "while writing to buffer".into(), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition"))?; + Ok(()) } @@ -469,7 +553,15 @@ impl Cache { pub fn remove(&self, tpe: FileType, id: &Id) -> RusticResult<()> { trace!("cache writing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); - fs::remove_file(filename).map_err(CacheBackendErrorKind::FromIoError)?; + fs::remove_file(&filename) + .map_err(|err| CacheBackendErrorKind::Io { + context: format!("while removing file: {filename:?}"), + source: err, + tpe: Some(tpe.clone()), + id: id.clone(), + }) + .map_err(|_err| todo!("Error transition"))?; + Ok(()) } } diff --git a/crates/core/src/backend/childstdout.rs b/crates/core/src/backend/childstdout.rs index 954392136..4debae0bd 100644 --- a/crates/core/src/backend/childstdout.rs +++ b/crates/core/src/backend/childstdout.rs @@ -7,8 +7,8 @@ use std::{ use crate::{ backend::{ReadSource, ReadSourceEntry}, - error::{RepositoryErrorKind, RusticResult}, - CommandInput, + error::RusticResult, + repository::command_input::{CommandInput, CommandInputErrorKind}, }; /// The `ChildStdoutSource` is a `ReadSource` when spawning a child process and reading its stdout @@ -35,13 +35,10 @@ impl ChildStdoutSource { .args(cmd.args()) .stdout(Stdio::piped()) .spawn() - .map_err(|err| { - RepositoryErrorKind::CommandExecutionFailed( - "stdin-command".into(), - "call".into(), - err, - ) - .into() + .map_err(|err| CommandInputErrorKind::ProcessExecutionFailed { + command: cmd.clone(), + path: path.clone(), + source: err, }); let process = cmd.on_failure().display_result(process)?; diff --git a/crates/core/src/backend/decrypt.rs b/crates/core/src/backend/decrypt.rs index e90d6f86d..00f00f86c 100644 --- a/crates/core/src/backend/decrypt.rs +++ b/crates/core/src/backend/decrypt.rs @@ -1,6 +1,5 @@ use std::{num::NonZeroU32, sync::Arc}; -use anyhow::Result; use bytes::Bytes; use crossbeam_channel::{unbounded, Receiver}; use rayon::prelude::*; @@ -8,21 +7,21 @@ use zstd::stream::{copy_encode, decode_all, encode_all}; pub use zstd::compression_level_range; -/// The maximum compression level allowed by zstd -#[must_use] -pub fn max_compression_level() -> i32 { - *compression_level_range().end() -} - use crate::{ - backend::{FileType, ReadBackend, WriteBackend}, + backend::{CryptBackendErrorKind, CryptBackendResult, FileType, ReadBackend, WriteBackend}, crypto::{hasher::hash, CryptoKey}, - error::{CryptBackendErrorKind, RusticErrorKind}, + error::RusticResult, id::Id, repofile::{RepoFile, RepoId}, - Progress, RusticResult, + Progress, }; +/// The maximum compression level allowed by zstd +#[must_use] +pub fn max_compression_level() -> i32 { + *compression_level_range().end() +} + /// A backend that can decrypt data. /// This is a trait that is implemented by all backends that can decrypt data. /// It is implemented for all backends that implement `DecryptWriteBackend` and `DecryptReadBackend`. @@ -79,9 +78,11 @@ pub trait DecryptReadBackend: ReadBackend + Clone + 'static { let mut data = self.decrypt(data)?; if let Some(length) = uncompressed_length { data = decode_all(&*data) - .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed)?; + .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed) + .map_err(|_err| todo!("Error transition"))?; if data.len() != length.get() as usize { - return Err(CryptBackendErrorKind::LengthOfUncompressedDataDoesNotMatch.into()); + return Err(CryptBackendErrorKind::LengthOfUncompressedDataDoesNotMatch) + .map_err(|_err| todo!("Error transition")); } } Ok(data.into()) @@ -111,9 +112,7 @@ pub trait DecryptReadBackend: ReadBackend + Clone + 'static { uncompressed_length: Option, ) -> RusticResult { self.read_encrypted_from_partial( - &self - .read_partial(tpe, id, cacheable, offset, length) - .map_err(RusticErrorKind::Backend)?, + &self.read_partial(tpe, id, cacheable, offset, length)?, uncompressed_length, ) } @@ -130,7 +129,8 @@ pub trait DecryptReadBackend: ReadBackend + Clone + 'static { fn get_file(&self, id: &Id) -> RusticResult { let data = self.read_encrypted_full(F::TYPE, id)?; Ok(serde_json::from_slice(&data) - .map_err(CryptBackendErrorKind::DeserializingFromBytesOfJsonTextFailed)?) + .map_err(CryptBackendErrorKind::DeserializingFromBytesOfJsonTextFailed) + .map_err(|_err| todo!("Error transition"))?) } /// Streams all files. @@ -145,7 +145,7 @@ pub trait DecryptReadBackend: ReadBackend + Clone + 'static { /// /// If the files could not be read. fn stream_all(&self, p: &impl Progress) -> StreamResult { - let list = self.list(F::TYPE).map_err(RusticErrorKind::Backend)?; + let list = self.list(F::TYPE)?; self.stream_list(&list, p) } @@ -221,10 +221,12 @@ pub trait DecryptWriteBackend: WriteBackend + Clone + 'static { /// /// The hash of the written data. fn hash_write_full_uncompressed(&self, tpe: FileType, data: &[u8]) -> RusticResult { - let data = self.key().encrypt_data(data)?; + let data = self + .key() + .encrypt_data(data) + .map_err(|_err| todo!("Error transition"))?; let id = hash(&data); - self.write_bytes(tpe, &id, false, data.into()) - .map_err(RusticErrorKind::Backend)?; + self.write_bytes(tpe, &id, false, data.into())?; Ok(id) } /// Saves the given file. @@ -244,7 +246,8 @@ pub trait DecryptWriteBackend: WriteBackend + Clone + 'static { /// [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`]: crate::error::CryptBackendErrorKind::SerializingToJsonByteVectorFailed fn save_file(&self, file: &F) -> RusticResult { let data = serde_json::to_vec(file) - .map_err(CryptBackendErrorKind::SerializingToJsonByteVectorFailed)?; + .map_err(CryptBackendErrorKind::SerializingToJsonByteVectorFailed) + .map_err(|_err| todo!("Error transition"))?; self.hash_write_full(F::TYPE, &data) } @@ -265,7 +268,8 @@ pub trait DecryptWriteBackend: WriteBackend + Clone + 'static { /// [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`]: crate::error::CryptBackendErrorKind::SerializingToJsonByteVectorFailed fn save_file_uncompressed(&self, file: &F) -> RusticResult { let data = serde_json::to_vec(file) - .map_err(CryptBackendErrorKind::SerializingToJsonByteVectorFailed)?; + .map_err(CryptBackendErrorKind::SerializingToJsonByteVectorFailed) + .map_err(|_err| todo!("Error transition"))?; self.hash_write_full_uncompressed(F::TYPE, &data) } @@ -381,21 +385,30 @@ impl DecryptBackend { Ok(match decrypted.first() { Some(b'{' | b'[') => decrypted, // not compressed Some(2) => decode_all(&decrypted[1..]) - .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed)?, // 2 indicates compressed data following - _ => return Err(CryptBackendErrorKind::DecryptionNotSupportedForBackend)?, + .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed) + .map_err(|_err| todo!("Error transition"))?, // 2 indicates compressed data following + _ => { + return Err(CryptBackendErrorKind::DecryptionNotSupportedForBackend) + .map_err(|_err| todo!("Error transition"))? + } }) } /// encrypt and potentially compress a repository file - fn encrypt_file(&self, data: &[u8]) -> RusticResult> { + fn encrypt_file(&self, data: &[u8]) -> CryptBackendResult> { let data_encrypted = match self.zstd { Some(level) => { let mut out = vec![2_u8]; copy_encode(data, &mut out, level) .map_err(CryptBackendErrorKind::CopyEncodingDataFailed)?; - self.key().encrypt_data(&out)? + self.key() + .encrypt_data(&out) + .map_err(|_err| todo!("Error transition"))? } - None => self.key().encrypt_data(data)?, + None => self + .key() + .encrypt_data(data) + .map_err(|_err| todo!("Error transition"))?, }; Ok(data_encrypted) } @@ -404,23 +417,33 @@ impl DecryptBackend { if self.extra_verify { let check_data = self.decrypt_file(data_encrypted)?; if data != check_data { - return Err(CryptBackendErrorKind::ExtraVerificationFailed.into()); + return Err(CryptBackendErrorKind::ExtraVerificationFailed) + .map_err(|_err| todo!("Error transition")); } } Ok(()) } /// encrypt and potentially compress some data - fn encrypt_data(&self, data: &[u8]) -> RusticResult<(Vec, u32, Option)> { + fn encrypt_data(&self, data: &[u8]) -> CryptBackendResult<(Vec, u32, Option)> { let data_len: u32 = data .len() .try_into() .map_err(CryptBackendErrorKind::IntConversionFailed)?; let (data_encrypted, uncompressed_length) = match self.zstd { - None => (self.key.encrypt_data(data)?, None), + None => ( + self.key + .encrypt_data(data) + .map_err(|_err| todo!("Error transition"))?, + None, + ), // compress if requested Some(level) => ( - self.key.encrypt_data(&encode_all(data, level)?)?, + self.key + .encrypt_data( + &encode_all(data, level).map_err(|_err| todo!("Error transition"))?, + ) + .map_err(|_err| todo!("Error transition"))?, NonZeroU32::new(data_len), ), }; @@ -437,7 +460,8 @@ impl DecryptBackend { let data_check = self.read_encrypted_from_partial(data_encrypted, uncompressed_length)?; if data != data_check { - return Err(CryptBackendErrorKind::ExtraVerificationFailed.into()); + return Err(CryptBackendErrorKind::ExtraVerificationFailed) + .map_err(|_err| todo!("Error transition")); } } Ok(()) @@ -470,16 +494,19 @@ impl DecryptWriteBackend for DecryptBackend { /// /// [`CryptBackendErrorKind::CopyEncodingDataFailed`]: crate::error::CryptBackendErrorKind::CopyEncodingDataFailed fn hash_write_full(&self, tpe: FileType, data: &[u8]) -> RusticResult { - let data_encrypted = self.encrypt_file(data)?; + let data_encrypted = self + .encrypt_file(data) + .map_err(|_err| todo!("Error transition"))?; self.very_file(&data_encrypted, data)?; let id = hash(&data_encrypted); - self.write_bytes(tpe, &id, false, data_encrypted.into()) - .map_err(RusticErrorKind::Backend)?; + self.write_bytes(tpe, &id, false, data_encrypted.into())?; Ok(id) } fn process_data(&self, data: &[u8]) -> RusticResult<(Vec, u32, Option)> { - let (data_encrypted, data_len, uncompressed_length) = self.encrypt_data(data)?; + let (data_encrypted, data_len, uncompressed_length) = self + .encrypt_data(data) + .map_err(|_err| todo!("Error transition"))?; self.very_data(&data_encrypted, uncompressed_length, data)?; Ok((data_encrypted, data_len, uncompressed_length)) } @@ -514,7 +541,9 @@ impl DecryptReadBackend for DecryptBackend { /// /// A vector containing the decrypted data. fn decrypt(&self, data: &[u8]) -> RusticResult> { - self.key.decrypt_data(data) + self.key + .decrypt_data(data) + .map_err(|_err| todo!("Error transition")) } /// Reads encrypted data from the backend. @@ -532,8 +561,7 @@ impl DecryptReadBackend for DecryptBackend { /// [`CryptBackendErrorKind::DecryptionNotSupportedForBackend`]: crate::error::CryptBackendErrorKind::DecryptionNotSupportedForBackend /// [`CryptBackendErrorKind::DecodingZstdCompressedDataFailed`]: crate::error::CryptBackendErrorKind::DecodingZstdCompressedDataFailed fn read_encrypted_full(&self, tpe: FileType, id: &Id) -> RusticResult { - self.decrypt_file(&self.read_full(tpe, id).map_err(RusticErrorKind::Backend)?) - .map(Into::into) + self.decrypt_file(&self.read_full(tpe, id)?).map(Into::into) } } @@ -542,15 +570,15 @@ impl ReadBackend for DecryptBackend { self.be.location() } - fn list(&self, tpe: FileType) -> Result> { + fn list(&self, tpe: FileType) -> RusticResult> { self.be.list(tpe) } - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.be.list_with_size(tpe) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.be.read_full(tpe, id) } @@ -561,21 +589,21 @@ impl ReadBackend for DecryptBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { self.be.read_partial(tpe, id, cacheable, offset, length) } } impl WriteBackend for DecryptBackend { - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.be.create() } - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { self.be.write_bytes(tpe, id, cacheable, buf) } - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { self.be.remove(tpe, id, cacheable) } } diff --git a/crates/core/src/backend/dry_run.rs b/crates/core/src/backend/dry_run.rs index e93c90237..6dca6fd1c 100644 --- a/crates/core/src/backend/dry_run.rs +++ b/crates/core/src/backend/dry_run.rs @@ -1,13 +1,12 @@ -use anyhow::Result; use bytes::Bytes; use zstd::decode_all; use crate::{ backend::{ decrypt::{DecryptFullBackend, DecryptReadBackend, DecryptWriteBackend}, - FileType, ReadBackend, WriteBackend, + CryptBackendErrorKind, FileType, ReadBackend, WriteBackend, }, - error::{CryptBackendErrorKind, RusticErrorKind, RusticResult}, + error::RusticResult, id::Id, }; @@ -60,13 +59,16 @@ impl DecryptReadBackend for DryRunBackend { /// [`CryptBackendErrorKind::DecryptionNotSupportedForBackend`]: crate::error::CryptBackendErrorKind::DecryptionNotSupportedForBackend /// [`CryptBackendErrorKind::DecodingZstdCompressedDataFailed`]: crate::error::CryptBackendErrorKind::DecodingZstdCompressedDataFailed fn read_encrypted_full(&self, tpe: FileType, id: &Id) -> RusticResult { - let decrypted = - self.decrypt(&self.read_full(tpe, id).map_err(RusticErrorKind::Backend)?)?; + let decrypted = self.decrypt(&self.read_full(tpe, id)?)?; Ok(match decrypted.first() { Some(b'{' | b'[') => decrypted, // not compressed Some(2) => decode_all(&decrypted[1..]) - .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed)?, // 2 indicates compressed data following - _ => return Err(CryptBackendErrorKind::DecryptionNotSupportedForBackend.into()), + .map_err(CryptBackendErrorKind::DecodingZstdCompressedDataFailed) + .map_err(|_err| todo!("Error transition"))?, // 2 indicates compressed data following + _ => { + return Err(CryptBackendErrorKind::DecryptionNotSupportedForBackend) + .map_err(|_err| todo!("Error transition")) + } } .into()) } @@ -77,11 +79,11 @@ impl ReadBackend for DryRunBackend { self.be.location() } - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.be.list_with_size(tpe) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.be.read_full(tpe, id) } @@ -92,7 +94,7 @@ impl ReadBackend for DryRunBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { self.be.read_partial(tpe, id, cacheable, offset, length) } } @@ -133,7 +135,7 @@ impl DecryptWriteBackend for DryRunBackend { } impl WriteBackend for DryRunBackend { - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { if self.dry_run { Ok(()) } else { @@ -141,7 +143,7 @@ impl WriteBackend for DryRunBackend { } } - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { if self.dry_run { Ok(()) } else { @@ -149,7 +151,7 @@ impl WriteBackend for DryRunBackend { } } - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { if self.dry_run { Ok(()) } else { diff --git a/crates/core/src/backend/hotcold.rs b/crates/core/src/backend/hotcold.rs index 75a1f750f..e5306e2f4 100644 --- a/crates/core/src/backend/hotcold.rs +++ b/crates/core/src/backend/hotcold.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use anyhow::Result; use bytes::Bytes; use crate::{ backend::{FileType, ReadBackend, WriteBackend}, + error::RusticResult, id::Id, }; @@ -45,11 +45,11 @@ impl ReadBackend for HotColdBackend { self.be.location() } - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.be.list_with_size(tpe) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.be_hot.read_full(tpe, id) } @@ -60,7 +60,7 @@ impl ReadBackend for HotColdBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { if cacheable || tpe != FileType::Pack { self.be_hot.read_partial(tpe, id, cacheable, offset, length) } else { @@ -72,25 +72,25 @@ impl ReadBackend for HotColdBackend { self.be.needs_warm_up() } - fn warm_up(&self, tpe: FileType, id: &Id) -> Result<()> { + fn warm_up(&self, tpe: FileType, id: &Id) -> RusticResult<()> { self.be.warm_up(tpe, id) } } impl WriteBackend for HotColdBackend { - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.be.create()?; self.be_hot.create() } - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { if tpe != FileType::Config && (cacheable || tpe != FileType::Pack) { self.be_hot.write_bytes(tpe, id, cacheable, buf.clone())?; } self.be.write_bytes(tpe, id, cacheable, buf) } - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { // First remove cold file self.be.remove(tpe, id, cacheable)?; if cacheable || tpe != FileType::Pack { diff --git a/crates/core/src/backend/ignore.rs b/crates/core/src/backend/ignore.rs index 4270851fa..8ebc3a381 100644 --- a/crates/core/src/backend/ignore.rs +++ b/crates/core/src/backend/ignore.rs @@ -6,8 +6,6 @@ use std::{ path::{Path, PathBuf}, }; -use serde_with::{serde_as, DisplayFromStr}; - use bytesize::ByteSize; #[cfg(not(windows))] use cached::proc_macro::cached; @@ -19,6 +17,7 @@ use ignore::{overrides::OverrideBuilder, DirEntry, Walk, WalkBuilder}; use log::warn; #[cfg(not(windows))] use nix::unistd::{Gid, Group, Uid, User}; +use serde_with::{serde_as, DisplayFromStr}; #[cfg(not(windows))] use crate::backend::node::ExtendedAttribute; @@ -28,9 +27,47 @@ use crate::{ node::{Metadata, Node, NodeType}, ReadSource, ReadSourceEntry, ReadSourceOpen, }, - error::{IgnoreErrorKind, RusticResult}, + error::RusticResult, }; +/// [`IgnoreErrorKind`] describes the errors that can be returned by a Ignore action in Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum IgnoreErrorKind { + /// generic Ignore error: `{0:?}` + GenericError(ignore::Error), + /// Error reading glob file `{file:?}`: `{source:?}` + ErrorGlob { + file: PathBuf, + source: std::io::Error, + }, + /// Unable to open file `{file:?}`: `{source:?}` + UnableToOpenFile { + file: PathBuf, + source: std::io::Error, + }, + /// Error getting xattrs for `{path:?}`: `{source:?}` + ErrorXattr { + path: PathBuf, + source: std::io::Error, + }, + /// Error reading link target for `{path:?}`: `{source:?}` + ErrorLink { + path: PathBuf, + source: std::io::Error, + }, + #[cfg(not(windows))] + /// Error converting ctime `{ctime}` and ctime_nsec `{ctime_nsec}` to Utc Timestamp: `{source:?}` + CtimeConversionToTimestampFailed { + ctime: i64, + ctime_nsec: i64, + source: ignore::Error, + }, + /// Error acquiring metadata for `{name}`: `{source:?}` + AcquiringMetadataFailed { name: String, source: ignore::Error }, +} + +pub(crate) type IgnoreResult = Result; + /// A [`LocalSource`] is a source from local paths which is used to be read from (i.e. to backup it). #[derive(Debug)] pub struct LocalSource { @@ -160,7 +197,7 @@ impl LocalSource { for g in &filter_opts.globs { _ = override_builder .add(g) - .map_err(IgnoreErrorKind::GenericError)?; + .map_err(|_err| todo!("Error transition"))?; } for file in &filter_opts.glob_files { @@ -168,22 +205,23 @@ impl LocalSource { .map_err(|err| IgnoreErrorKind::ErrorGlob { file: file.into(), source: err, - })? + }) + .map_err(|_err| todo!("Error transition"))? .lines() { _ = override_builder .add(line) - .map_err(IgnoreErrorKind::GenericError)?; + .map_err(|_err| todo!("Error transition"))?; } } _ = override_builder .case_insensitive(true) - .map_err(IgnoreErrorKind::GenericError)?; + .map_err(|_err| todo!("Error transition"))?; for g in &filter_opts.iglobs { _ = override_builder .add(g) - .map_err(IgnoreErrorKind::GenericError)?; + .map_err(|_err| todo!("Error transition"))?; } for file in &filter_opts.iglob_files { @@ -191,12 +229,13 @@ impl LocalSource { .map_err(|err| IgnoreErrorKind::ErrorGlob { file: file.into(), source: err, - })? + }) + .map_err(|_err| todo!("Error transition"))? .lines() { _ = override_builder .add(line) - .map_err(IgnoreErrorKind::GenericError)?; + .map_err(|_err| todo!("Error transition"))?; } } @@ -216,7 +255,7 @@ impl LocalSource { .overrides( override_builder .build() - .map_err(IgnoreErrorKind::GenericError)?, + .map_err(|_err| todo!("Error transition"))?, ); let exclude_if_present = filter_opts.exclude_if_present.clone(); @@ -260,13 +299,12 @@ impl ReadSourceOpen for OpenFile { /// [`IgnoreErrorKind::UnableToOpenFile`]: crate::error::IgnoreErrorKind::UnableToOpenFile fn open(self) -> RusticResult { let path = self.0; - File::open(&path).map_err(|err| { - IgnoreErrorKind::UnableToOpenFile { + File::open(&path) + .map_err(|err| IgnoreErrorKind::UnableToOpenFile { file: path, source: err, - } - .into() - }) + }) + .map_err(|_err| todo!("Error transition")) } } @@ -330,11 +368,11 @@ impl Iterator for LocalSourceWalker { } .map(|e| { map_entry( - e.map_err(IgnoreErrorKind::GenericError)?, + e.map_err(|_err| todo!("Error transition"))?, self.save_opts.with_atime, self.save_opts.ignore_devid, ) - .map_err(Into::into) + .map_err(|_err| todo!("Error transition")) }) } } @@ -360,9 +398,9 @@ fn map_entry( entry: DirEntry, with_atime: bool, _ignore_devid: bool, -) -> RusticResult> { +) -> IgnoreResult> { let name = entry.file_name(); - let m = entry.metadata().map_err(IgnoreErrorKind::GenericError)?; + let m = entry.metadata().map_err(|_err| todo!("Error transition"))?; // TODO: Set them to suitable values let uid = None; @@ -473,7 +511,7 @@ fn get_group_by_gid(gid: u32) -> Option { } #[cfg(all(not(windows), target_os = "openbsd"))] -fn list_extended_attributes(path: &Path) -> RusticResult> { +fn list_extended_attributes(path: &Path) -> IgnoreResult> { Ok(vec![]) } @@ -487,7 +525,7 @@ fn list_extended_attributes(path: &Path) -> RusticResult> /// /// * [`IgnoreErrorKind::ErrorXattr`] - if Xattr couldn't be listed or couldn't be read #[cfg(all(not(windows), not(target_os = "openbsd")))] -fn list_extended_attributes(path: &Path) -> RusticResult> { +fn list_extended_attributes(path: &Path) -> IgnoreResult> { xattr::list(path) .map_err(|err| IgnoreErrorKind::ErrorXattr { path: path.to_path_buf(), @@ -502,7 +540,7 @@ fn list_extended_attributes(path: &Path) -> RusticResult> })?, }) }) - .collect::>>() + .collect::>>() } /// Maps a [`DirEntry`] to a [`ReadSourceEntry`]. @@ -527,9 +565,14 @@ fn map_entry( entry: DirEntry, with_atime: bool, ignore_devid: bool, -) -> RusticResult> { +) -> IgnoreResult> { let name = entry.file_name(); - let m = entry.metadata().map_err(IgnoreErrorKind::GenericError)?; + let m = entry + .metadata() + .map_err(|err| IgnoreErrorKind::AcquiringMetadataFailed { + name: name.to_string_lossy().to_string(), + source: err, + })?; let uid = m.uid(); let gid = m.gid(); @@ -553,7 +596,11 @@ fn map_entry( m.ctime(), m.ctime_nsec() .try_into() - .map_err(IgnoreErrorKind::FromTryFromIntError)?, + .map_err(|err| IgnoreErrorKind::CtimeConversionFailed { + ctime: m.ctime(), + ctime_nsec: m.ctime_nsec(), + source: err, + })?, ) .single() .map(|dt| dt.with_timezone(&Local)); diff --git a/crates/core/src/backend/local_destination.rs b/crates/core/src/backend/local_destination.rs index 8c2ea2d46..fa1ea636f 100644 --- a/crates/core/src/backend/local_destination.rs +++ b/crates/core/src/backend/local_destination.rs @@ -4,6 +4,7 @@ use std::os::unix::fs::{symlink, PermissionsExt}; use std::{ fs::{self, File, OpenOptions}, io::{Read, Seek, SeekFrom, Write}, + num::TryFromIntError, path::{Path, PathBuf}, }; @@ -14,6 +15,8 @@ use filetime::{set_symlink_file_times, FileTime}; #[cfg(not(windows))] use log::warn; #[cfg(not(windows))] +use nix::errno::Errno; +#[cfg(not(windows))] use nix::sys::stat::{mknod, Mode, SFlag}; #[cfg(not(windows))] use nix::{ @@ -25,11 +28,84 @@ use nix::{ use crate::backend::ignore::mapper::map_mode_from_go; #[cfg(not(windows))] use crate::backend::node::NodeType; -use crate::{ - backend::node::{ExtendedAttribute, Metadata, Node}, - error::LocalDestinationErrorKind, - RusticResult, -}; +use crate::backend::node::{ExtendedAttribute, Metadata, Node}; +use crate::error::RusticResult; + +/// [`LocalDestinationErrorKind`] describes the errors that can be returned by an action on the filesystem in Backends +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum LocalDestinationErrorKind { + /// directory creation failed: `{0:?}` + DirectoryCreationFailed(std::io::Error), + /// file `{0:?}` should have a parent + FileDoesNotHaveParent(PathBuf), + /// DeviceID could not be converted to other type `{target}` of device `{device}`: `{source}` + DeviceIdConversionFailed { + target: String, + device: u64, + source: TryFromIntError, + }, + /// Length conversion failed for `{target}` of length `{length}`: `{source}` + LengthConversionFailed { + target: String, + length: u64, + source: TryFromIntError, + }, + /// [`walkdir::Error`] + #[error(transparent)] + FromWalkdirError(walkdir::Error), + /// [`Errno`] + #[error(transparent)] + #[cfg(not(windows))] + FromErrnoError(Errno), + /// listing xattrs on `{path:?}`: `{source:?}` + #[cfg(not(any(windows, target_os = "openbsd")))] + ListingXattrsFailed { + path: PathBuf, + source: std::io::Error, + }, + /// setting xattr `{name}` on `{filename:?}` with `{source:?}` + #[cfg(not(any(windows, target_os = "openbsd")))] + SettingXattrFailed { + name: String, + filename: PathBuf, + source: std::io::Error, + }, + /// getting xattr `{name}` on `{filename:?}` with `{source:?}` + #[cfg(not(any(windows, target_os = "openbsd")))] + GettingXattrFailed { + name: String, + filename: PathBuf, + source: std::io::Error, + }, + /// removing directories failed: `{0:?}` + DirectoryRemovalFailed(std::io::Error), + /// removing file failed: `{0:?}` + FileRemovalFailed(std::io::Error), + /// setting time metadata failed: `{0:?}` + SettingTimeMetadataFailed(std::io::Error), + /// opening file failed: `{0:?}` + OpeningFileFailed(std::io::Error), + /// setting file length failed: `{0:?}` + SettingFileLengthFailed(std::io::Error), + /// can't jump to position in file: `{0:?}` + CouldNotSeekToPositionInFile(std::io::Error), + /// couldn't write to buffer: `{0:?}` + CouldNotWriteToBuffer(std::io::Error), + /// reading exact length of file contents failed: `{0:?}` + ReadingExactLengthOfFileFailed(std::io::Error), + /// setting file permissions failed: `{0:?}` + #[cfg(not(windows))] + SettingFilePermissionsFailed(std::io::Error), + /// failed to symlink target `{linktarget:?}` from `{filename:?}` with `{source:?}` + #[cfg(not(windows))] + SymlinkingFailed { + linktarget: PathBuf, + filename: PathBuf, + source: std::io::Error, + }, +} + +pub(crate) type LocalDestinationResult = Result; #[derive(Clone, Debug)] /// Local destination, used when restoring. @@ -78,11 +154,13 @@ impl LocalDestination { if is_file { if let Some(path) = path.parent() { fs::create_dir_all(path) - .map_err(LocalDestinationErrorKind::DirectoryCreationFailed)?; + .map_err(LocalDestinationErrorKind::DirectoryCreationFailed) + .map_err(|_err| todo!("Error transition"))?; } } else { fs::create_dir_all(&path) - .map_err(LocalDestinationErrorKind::DirectoryCreationFailed)?; + .map_err(LocalDestinationErrorKind::DirectoryCreationFailed) + .map_err(|_err| todo!("Error transition"))?; } } @@ -126,7 +204,7 @@ impl LocalDestination { /// This will remove the directory recursively. /// /// [`LocalDestinationErrorKind::DirectoryRemovalFailed`]: crate::error::LocalDestinationErrorKind::DirectoryRemovalFailed - pub fn remove_dir(&self, dirname: impl AsRef) -> RusticResult<()> { + pub(crate) fn remove_dir(&self, dirname: impl AsRef) -> LocalDestinationResult<()> { Ok(fs::remove_dir_all(dirname) .map_err(LocalDestinationErrorKind::DirectoryRemovalFailed)?) } @@ -149,7 +227,7 @@ impl LocalDestination { /// * If the file is a directory or device, this will fail. /// /// [`LocalDestinationErrorKind::FileRemovalFailed`]: crate::error::LocalDestinationErrorKind::FileRemovalFailed - pub fn remove_file(&self, filename: impl AsRef) -> RusticResult<()> { + pub(crate) fn remove_file(&self, filename: impl AsRef) -> LocalDestinationResult<()> { Ok(fs::remove_file(filename).map_err(LocalDestinationErrorKind::FileRemovalFailed)?) } @@ -168,7 +246,7 @@ impl LocalDestination { /// This will create the directory structure recursively. /// /// [`LocalDestinationErrorKind::DirectoryCreationFailed`]: crate::error::LocalDestinationErrorKind::DirectoryCreationFailed - pub fn create_dir(&self, item: impl AsRef) -> RusticResult<()> { + pub(crate) fn create_dir(&self, item: impl AsRef) -> LocalDestinationResult<()> { let dirname = self.path.join(item); fs::create_dir_all(dirname).map_err(LocalDestinationErrorKind::DirectoryCreationFailed)?; Ok(()) @@ -186,7 +264,11 @@ impl LocalDestination { /// * [`LocalDestinationErrorKind::SettingTimeMetadataFailed`] - If the times could not be set /// /// [`LocalDestinationErrorKind::SettingTimeMetadataFailed`]: crate::error::LocalDestinationErrorKind::SettingTimeMetadataFailed - pub fn set_times(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { + pub(crate) fn set_times( + &self, + item: impl AsRef, + meta: &Metadata, + ) -> LocalDestinationResult<()> { let filename = self.path(item); if let Some(mtime) = meta.mtime { let atime = meta.atime.unwrap_or(mtime); @@ -213,7 +295,11 @@ impl LocalDestination { /// # Errors /// /// If the user/group could not be set. - pub fn set_user_group(&self, _item: impl AsRef, _meta: &Metadata) -> RusticResult<()> { + pub(crate) fn set_user_group( + &self, + _item: impl AsRef, + _meta: &Metadata, + ) -> LocalDestinationResult<()> { // https://learn.microsoft.com/en-us/windows/win32/fileio/file-security-and-access-rights // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Security/struct.SECURITY_ATTRIBUTES.html // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Storage/FileSystem/struct.CREATEFILE2_EXTENDED_PARAMETERS.html#structfield.lpSecurityAttributes @@ -234,7 +320,11 @@ impl LocalDestination { /// /// [`LocalDestinationErrorKind::FromErrnoError`]: crate::error::LocalDestinationErrorKind::FromErrnoError #[allow(clippy::similar_names)] - pub fn set_user_group(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { + pub(crate) fn set_user_group( + &self, + item: impl AsRef, + meta: &Metadata, + ) -> LocalDestinationResult<()> { let filename = self.path(item); let user = meta.user.clone().and_then(uid_from_name); @@ -262,7 +352,11 @@ impl LocalDestination { /// # Errors /// /// If the uid/gid could not be set. - pub fn set_uid_gid(&self, _item: impl AsRef, _meta: &Metadata) -> RusticResult<()> { + pub(crate) fn set_uid_gid( + &self, + _item: impl AsRef, + _meta: &Metadata, + ) -> LocalDestinationResult<()> { Ok(()) } @@ -280,7 +374,11 @@ impl LocalDestination { /// /// [`LocalDestinationErrorKind::FromErrnoError`]: crate::error::LocalDestinationErrorKind::FromErrnoError #[allow(clippy::similar_names)] - pub fn set_uid_gid(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { + pub(crate) fn set_uid_gid( + &self, + item: impl AsRef, + meta: &Metadata, + ) -> LocalDestinationResult<()> { let filename = self.path(item); let uid = meta.uid.map(Uid::from_raw); @@ -303,7 +401,11 @@ impl LocalDestination { /// # Errors /// /// If the permissions could not be set. - pub fn set_permission(&self, _item: impl AsRef, _node: &Node) -> RusticResult<()> { + pub(crate) fn set_permission( + &self, + _item: impl AsRef, + _node: &Node, + ) -> LocalDestinationResult<()> { Ok(()) } @@ -321,7 +423,11 @@ impl LocalDestination { /// /// [`LocalDestinationErrorKind::SettingFilePermissionsFailed`]: crate::error::LocalDestinationErrorKind::SettingFilePermissionsFailed #[allow(clippy::similar_names)] - pub fn set_permission(&self, item: impl AsRef, node: &Node) -> RusticResult<()> { + pub(crate) fn set_permission( + &self, + item: impl AsRef, + node: &Node, + ) -> LocalDestinationResult<()> { if node.is_symlink() { return Ok(()); } @@ -349,11 +455,11 @@ impl LocalDestination { /// # Errors /// /// If the extended attributes could not be set. - pub fn set_extended_attributes( + pub(crate) fn set_extended_attributes( &self, _item: impl AsRef, _extended_attributes: &[ExtendedAttribute], - ) -> RusticResult<()> { + ) -> LocalDestinationResult<()> { Ok(()) } @@ -382,11 +488,11 @@ impl LocalDestination { /// # Panics /// /// If the extended attributes could not be set. - pub fn set_extended_attributes( + pub(crate) fn set_extended_attributes( &self, item: impl AsRef, extended_attributes: &[ExtendedAttribute], - ) -> RusticResult<()> { + ) -> LocalDestinationResult<()> { let filename = self.path(item); let mut done = vec![false; extended_attributes.len()]; @@ -463,7 +569,11 @@ impl LocalDestination { /// [`LocalDestinationErrorKind::DirectoryCreationFailed`]: crate::error::LocalDestinationErrorKind::DirectoryCreationFailed /// [`LocalDestinationErrorKind::OpeningFileFailed`]: crate::error::LocalDestinationErrorKind::OpeningFileFailed /// [`LocalDestinationErrorKind::SettingFileLengthFailed`]: crate::error::LocalDestinationErrorKind::SettingFileLengthFailed - pub fn set_length(&self, item: impl AsRef, size: u64) -> RusticResult<()> { + pub(crate) fn set_length( + &self, + item: impl AsRef, + size: u64, + ) -> LocalDestinationResult<()> { let filename = self.path(item); let dir = filename .parent() @@ -497,7 +607,11 @@ impl LocalDestination { /// # Returns /// /// Ok if the special file was created. - pub fn create_special(&self, _item: impl AsRef, _node: &Node) -> RusticResult<()> { + pub(crate) fn create_special( + &self, + _item: impl AsRef, + _node: &Node, + ) -> LocalDestinationResult<()> { Ok(()) } @@ -512,13 +626,17 @@ impl LocalDestination { /// # Errors /// /// * [`LocalDestinationErrorKind::SymlinkingFailed`] - If the symlink could not be created. - /// * [`LocalDestinationErrorKind::FromTryIntError`] - If the device could not be converted to the correct type. + /// * [`LocalDestinationErrorKind::DeviceIdConversionFailed`] - If the device could not be converted to the correct type. /// * [`LocalDestinationErrorKind::FromErrnoError`] - If the device could not be created. /// - /// [`LocalDestinationErrorKind::SymlinkingFailed`]: crate::error::LocalDestinationErrorKind::SymlinkingFailed - /// [`LocalDestinationErrorKind::FromTryIntError`]: crate::error::LocalDestinationErrorKind::FromTryIntError - /// [`LocalDestinationErrorKind::FromErrnoError`]: crate::error::LocalDestinationErrorKind::FromErrnoError - pub fn create_special(&self, item: impl AsRef, node: &Node) -> RusticResult<()> { + /// [`LocalDestinationErrorKind::SymlinkingFailed`]: LocalDestinationErrorKind::SymlinkingFailed + /// [`LocalDestinationErrorKind::DeviceIdConversionFailed`]: LocalDestinationErrorKind::DeviceIdConversionFailed + /// [`LocalDestinationErrorKind::FromErrnoError`]: LocalDestinationErrorKind::FromErrnoError + pub(crate) fn create_special( + &self, + item: impl AsRef, + node: &Node, + ) -> LocalDestinationResult<()> { let filename = self.path(item); match &node.node_type { @@ -540,11 +658,21 @@ impl LocalDestination { )))] let device = *device; #[cfg(any(target_os = "macos", target_os = "openbsd"))] - let device = - i32::try_from(*device).map_err(LocalDestinationErrorKind::FromTryIntError)?; + let device = i32::try_from(*device).map_err(|err| { + LocalDestinationErrorKind::DeviceIdConversionFailed { + target: "i32".to_string(), + device: *device, + source: err, + } + })?; #[cfg(target_os = "freebsd")] - let device = - u32::try_from(*device).map_err(LocalDestinationErrorKind::FromTryIntError)?; + let device = u32::try_from(*device).map_err(|err| { + LocalDestinationErrorKind::DeviceIdConversionFailed { + target: "u32".to_string(), + device: *device, + source: err, + } + })?; mknod(&filename, SFlag::S_IFBLK, Mode::empty(), device) .map_err(LocalDestinationErrorKind::FromErrnoError)?; } @@ -556,11 +684,21 @@ impl LocalDestination { )))] let device = *device; #[cfg(any(target_os = "macos", target_os = "openbsd"))] - let device = - i32::try_from(*device).map_err(LocalDestinationErrorKind::FromTryIntError)?; + let device = i32::try_from(*device).map_err(|err| { + LocalDestinationErrorKind::DeviceIdConversionFailed { + target: "i32".to_string(), + device: *device, + source: err, + } + })?; #[cfg(target_os = "freebsd")] - let device = - u32::try_from(*device).map_err(LocalDestinationErrorKind::FromTryIntError)?; + let device = u32::try_from(*device).map_err(|err| { + LocalDestinationErrorKind::DeviceIdConversionFailed { + target: "u32".to_string(), + device: *device, + source: err, + } + })?; mknod(&filename, SFlag::S_IFCHR, Mode::empty(), device) .map_err(LocalDestinationErrorKind::FromErrnoError)?; } @@ -588,15 +726,20 @@ impl LocalDestination { /// # Errors /// /// * [`LocalDestinationErrorKind::OpeningFileFailed`] - If the file could not be opened. - /// * [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. - /// * [`LocalDestinationErrorKind::FromTryIntError`] - If the length of the file could not be converted to u32. + /// * [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be sought to the given position. + /// * [`LocalDestinationErrorKind::LengthConversionFailed`] - If the length of the file could not be converted to u32. /// * [`LocalDestinationErrorKind::ReadingExactLengthOfFileFailed`] - If the length of the file could not be read. /// - /// [`LocalDestinationErrorKind::OpeningFileFailed`]: crate::error::LocalDestinationErrorKind::OpeningFileFailed - /// [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`]: crate::error::LocalDestinationErrorKind::CouldNotSeekToPositionInFile - /// [`LocalDestinationErrorKind::FromTryIntError`]: crate::error::LocalDestinationErrorKind::FromTryIntError - /// [`LocalDestinationErrorKind::ReadingExactLengthOfFileFailed`]: crate::error::LocalDestinationErrorKind::ReadingExactLengthOfFileFailed - pub fn read_at(&self, item: impl AsRef, offset: u64, length: u64) -> RusticResult { + /// [`LocalDestinationErrorKind::OpeningFileFailed`]: LocalDestinationErrorKind::OpeningFileFailed + /// [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`]: LocalDestinationErrorKind::CouldNotSeekToPositionInFile + /// [`LocalDestinationErrorKind::LengthConversionFailed`]: LocalDestinationErrorKind::LengthConversionFailed + /// [`LocalDestinationErrorKind::ReadingExactLengthOfFileFailed`]: LocalDestinationErrorKind::ReadingExactLengthOfFileFailed + pub(crate) fn read_at( + &self, + item: impl AsRef, + offset: u64, + length: u64, + ) -> LocalDestinationResult { let filename = self.path(item); let mut file = File::open(filename).map_err(LocalDestinationErrorKind::OpeningFileFailed)?; @@ -605,9 +748,13 @@ impl LocalDestination { .map_err(LocalDestinationErrorKind::CouldNotSeekToPositionInFile)?; let mut vec = vec![ 0; - length - .try_into() - .map_err(LocalDestinationErrorKind::FromTryIntError)? + length.try_into().map_err(|err| { + LocalDestinationErrorKind::LengthConversionFailed { + target: "u8".to_string(), + length, + source: err, + } + })? ]; file.read_exact(&mut vec) .map_err(LocalDestinationErrorKind::ReadingExactLengthOfFileFailed)?; @@ -625,7 +772,7 @@ impl LocalDestination { /// /// If a file exists and size matches, this returns a `File` open for reading. /// In all other cases, returns `None` - pub fn get_matching_file(&self, item: impl AsRef, size: u64) -> Option { + pub(crate) fn get_matching_file(&self, item: impl AsRef, size: u64) -> Option { let filename = self.path(item); fs::symlink_metadata(&filename).map_or_else( |_| None, @@ -650,7 +797,7 @@ impl LocalDestination { /// # Errors /// /// * [`LocalDestinationErrorKind::OpeningFileFailed`] - If the file could not be opened. - /// * [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. + /// * [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be sought to the given position. /// * [`LocalDestinationErrorKind::CouldNotWriteToBuffer`] - If the bytes could not be written to the file. /// /// # Notes @@ -660,7 +807,12 @@ impl LocalDestination { /// [`LocalDestinationErrorKind::OpeningFileFailed`]: crate::error::LocalDestinationErrorKind::OpeningFileFailed /// [`LocalDestinationErrorKind::CouldNotSeekToPositionInFile`]: crate::error::LocalDestinationErrorKind::CouldNotSeekToPositionInFile /// [`LocalDestinationErrorKind::CouldNotWriteToBuffer`]: crate::error::LocalDestinationErrorKind::CouldNotWriteToBuffer - pub fn write_at(&self, item: impl AsRef, offset: u64, data: &[u8]) -> RusticResult<()> { + pub(crate) fn write_at( + &self, + item: impl AsRef, + offset: u64, + data: &[u8], + ) -> LocalDestinationResult<()> { let filename = self.path(item); let mut file = OpenOptions::new() .create(true) diff --git a/crates/core/src/backend/node.rs b/crates/core/src/backend/node.rs index 94fb79954..3ff4522e5 100644 --- a/crates/core/src/backend/node.rs +++ b/crates/core/src/backend/node.rs @@ -11,11 +11,10 @@ use std::fmt::Write; #[cfg(not(windows))] use std::os::unix::ffi::OsStrExt; -#[cfg(not(windows))] -use crate::RusticResult; - use chrono::{DateTime, Local}; use derive_more::Constructor; +#[cfg(not(windows))] +use displaydoc::Display; use serde_aux::prelude::*; use serde_derive::{Deserialize, Serialize}; use serde_with::{ @@ -23,12 +22,71 @@ use serde_with::{ formats::Padded, serde_as, skip_serializing_none, DefaultOnNull, }; - #[cfg(not(windows))] -use crate::error::NodeErrorKind; +use thiserror::Error; use crate::blob::{tree::TreeId, DataId}; +#[cfg(not(windows))] +/// [`NodeErrorKind`] describes the errors that can be returned by an action utilizing a node in Backends +#[derive(thiserror::Error, Debug, Display)] +#[non_exhaustive] +pub enum NodeErrorKind { + /// Unexpected EOF while parsing filename: `{file_name}` + #[cfg(not(windows))] + UnexpectedEOF { + /// The filename + file_name: String, + /// The remaining chars + chars: std::str::Chars, + }, + /// Invalid unicode + #[cfg(not(windows))] + InvalidUnicode { + /// The filename + file_name: String, + /// The unicode codepoint + unicode: u32, + /// The remaining chars + chars: std::str::Chars, + }, + /// Unrecognized Escape while parsing filename: `{file_name}` + #[cfg(not(windows))] + UnrecognizedEscape { + /// The filename + file_name: String, + /// The remaining chars + chars: std::str::Chars, + }, + /// Parsing hex chars {chars:?} failed for `{hex}` in filename: `{file_name}` : `{source}` + #[cfg(not(windows))] + ParsingHexFailed { + /// The filename + file_name: String, + /// The hex string + hex: String, + /// The remaining chars + chars: std::str::Chars, + /// The error that occurred + source: ParseIntError, + }, + /// Parsing unicode chars {chars:?} failed for `{target}` in filename: `{file_name}` : `{source}` + #[cfg(not(windows))] + ParsingUnicodeFailed { + /// The filename + file_name: String, + /// The target type + target: String, + /// The remaining chars + chars: std::str::Chars, + /// The error that occurred + source: ParseIntError, + }, +} + +#[cfg(not(windows))] +pub(crate) type NodeResult = Result; + #[derive( Default, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Constructor, PartialOrd, Ord, )] @@ -399,7 +457,7 @@ fn escape_filename(name: &OsStr) -> String { /// /// * `s` - The escaped filename // inspired by the enquote crate -fn unescape_filename(s: &str) -> RusticResult { +fn unescape_filename(s: &str) -> NodeResult { let mut chars = s.chars(); let mut u = Vec::new(); loop { @@ -408,7 +466,12 @@ fn unescape_filename(s: &str) -> RusticResult { Some(c) => { if c == '\\' { match chars.next() { - None => return Err(NodeErrorKind::UnexpectedEOF.into()), + None => { + return Err(NodeErrorKind::UnexpectedEOF { + file_name: s.to_string(), + chars, + }) + } Some(c) => match c { '\\' => u.push(b'\\'), '"' => u.push(b'"'), @@ -424,31 +487,62 @@ fn unescape_filename(s: &str) -> RusticResult { // hex 'x' => { let hex = take(&mut chars, 2); - u.push( - u8::from_str_radix(&hex, 16) - .map_err(NodeErrorKind::FromParseIntError)?, - ); + u.push(u8::from_str_radix(&hex, 16).map_err(|err| { + NodeErrorKind::ParsingHexFailed { + file_name: s.to_string(), + hex: hex.to_string(), + chars, + source: err, + } + })?); } // unicode 'u' => { - let n = u32::from_str_radix(&take(&mut chars, 4), 16) - .map_err(NodeErrorKind::FromParseIntError)?; - let c = - std::char::from_u32(n).ok_or(NodeErrorKind::InvalidUnicode)?; + let n = u32::from_str_radix(&take(&mut chars, 4), 16).map_err( + |err| NodeErrorKind::ParsingUnicodeFailed { + file_name: s.to_string(), + target: "u32".to_string(), + chars, + source: err, + }, + )?; + let c = std::char::from_u32(n).ok_or( + NodeErrorKind::InvalidUnicode { + file_name: s.to_string(), + unicode: n, + chars, + }, + )?; let mut bytes = vec![0u8; c.len_utf8()]; _ = c.encode_utf8(&mut bytes); u.extend_from_slice(&bytes); } 'U' => { - let n = u32::from_str_radix(&take(&mut chars, 8), 16) - .map_err(NodeErrorKind::FromParseIntError)?; - let c = - std::char::from_u32(n).ok_or(NodeErrorKind::InvalidUnicode)?; + let n = u32::from_str_radix(&take(&mut chars, 8), 16).map_err( + |err| NodeErrorKind::ParsingUnicodeFailed { + file_name: s.to_string(), + target: "u32".to_string(), + chars, + source: err, + }, + )?; + let c = std::char::from_u32(n).ok_or( + NodeErrorKind::InvalidUnicode { + file_name: s.to_string(), + unicode: n, + chars, + }, + )?; let mut bytes = vec![0u8; c.len_utf8()]; _ = c.encode_utf8(&mut bytes); u.extend_from_slice(&bytes); } - _ => return Err(NodeErrorKind::UnrecognizedEscape.into()), + _ => { + return Err(NodeErrorKind::UnrecognizedEscape { + file_name: s.to_string(), + chars, + }) + } }, } } else { diff --git a/crates/core/src/backend/warm_up.rs b/crates/core/src/backend/warm_up.rs index 87e9e0093..3e25d49ae 100644 --- a/crates/core/src/backend/warm_up.rs +++ b/crates/core/src/backend/warm_up.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use anyhow::Result; use bytes::Bytes; use crate::{ backend::{FileType, ReadBackend, WriteBackend}, + error::RusticResult, id::Id, }; @@ -31,11 +31,11 @@ impl ReadBackend for WarmUpAccessBackend { self.be.location() } - fn list_with_size(&self, tpe: FileType) -> Result> { + fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.be.list_with_size(tpe) } - fn read_full(&self, tpe: FileType, id: &Id) -> Result { + fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.be.read_full(tpe, id) } @@ -46,7 +46,7 @@ impl ReadBackend for WarmUpAccessBackend { cacheable: bool, offset: u32, length: u32, - ) -> Result { + ) -> RusticResult { self.be.read_partial(tpe, id, cacheable, offset, length) } @@ -54,7 +54,7 @@ impl ReadBackend for WarmUpAccessBackend { true } - fn warm_up(&self, tpe: FileType, id: &Id) -> Result<()> { + fn warm_up(&self, tpe: FileType, id: &Id) -> RusticResult<()> { // warm up files by accessing them - error is ignored as we expect this to error out! _ = self.be.read_partial(tpe, id, false, 0, 1); Ok(()) @@ -62,15 +62,15 @@ impl ReadBackend for WarmUpAccessBackend { } impl WriteBackend for WarmUpAccessBackend { - fn create(&self) -> Result<()> { + fn create(&self) -> RusticResult<()> { self.be.create() } - fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> Result<()> { + fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { self.be.write_bytes(tpe, id, cacheable, buf) } - fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> Result<()> { + fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { // First remove cold file self.be.remove(tpe, id, cacheable) } diff --git a/crates/core/src/blob/packer.rs b/crates/core/src/blob/packer.rs index 62c82759b..bf31b5eda 100644 --- a/crates/core/src/blob/packer.rs +++ b/crates/core/src/blob/packer.rs @@ -1,6 +1,3 @@ -use integer_sqrt::IntegerSquareRoot; -use log::warn; - use std::{ num::NonZeroU32, sync::{Arc, RwLock}, @@ -10,6 +7,8 @@ use std::{ use bytes::{Bytes, BytesMut}; use chrono::Local; use crossbeam_channel::{bounded, Receiver, Sender}; +use integer_sqrt::IntegerSquareRoot; +use log::warn; use pariter::{scope, IteratorExt}; use crate::{ @@ -19,7 +18,7 @@ use crate::{ }, blob::{BlobId, BlobType}, crypto::{hasher::hash, CryptoKey}, - error::{PackerErrorKind, RusticErrorKind, RusticResult}, + error::RusticResult, index::indexer::SharedIndexer, repofile::{ configfile::ConfigFile, @@ -29,6 +28,22 @@ use crate::{ }, }; +/// [`PackerErrorKind`] describes the errors that can be returned for a Packer +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum PackerErrorKind { + /// getting total size failed + GettingTotalSizeFailed, + /// Conversion from `{from}` to `{to}` failed: {source} + ConversionFailed { + to: &'static str, + from: &'static str, + source: std::num::TryFromIntError, + }, +} + +pub(crate) type PackerResult = Result; + pub(super) mod constants { use std::time::Duration; @@ -249,12 +264,13 @@ impl Packer { |(_, id, _, _, _)| !indexer.read().unwrap().has(id), ) }) - .try_for_each(|item: RusticResult<_>| { + .try_for_each(|item: RusticResult<_>| -> RusticResult<()> { let (data, id, data_len, ul, size_limit) = item?; raw_packer .write() .unwrap() .add_raw(&data, &id, data_len, ul, size_limit) + .map_err(|_err| todo!("Error transition")) }) .and_then(|()| raw_packer.write().unwrap().finalize()); _ = finish_tx.send(status); @@ -280,6 +296,7 @@ impl Packer { pub fn add(&self, data: Bytes, id: BlobId) -> RusticResult<()> { // compute size limit based on total size and size bounds self.add_with_sizelimit(data, id, None) + .map_err(|_err| todo!("Error transition")) } /// Adds the blob to the packfile, allows specifying a size limit for the pack file @@ -300,10 +317,10 @@ impl Packer { data: Bytes, id: BlobId, size_limit: Option, - ) -> RusticResult<()> { + ) -> PackerResult<()> { self.sender .send((data, id, size_limit)) - .map_err(PackerErrorKind::SendingCrossbeamMessageFailed)?; + .map_err(|_err| todo!("Error transition"))?; Ok(()) } @@ -328,7 +345,7 @@ impl Packer { data_len: u64, uncompressed_length: Option, size_limit: Option, - ) -> RusticResult<()> { + ) -> PackerResult<()> { // only add if this blob is not present if self.indexer.read().unwrap().has(id) { Ok(()) @@ -352,7 +369,9 @@ impl Packer { // cancel channel drop(self.sender); // wait for items in channel to be processed - self.finish.recv().unwrap() + self.finish + .recv() + .expect("Should be able to receive from channel to finalize packer.") } } @@ -478,7 +497,7 @@ impl RawPacker { /// /// If the packfile could not be saved fn finalize(&mut self) -> RusticResult { - self.save()?; + self.save().map_err(|_err| todo!("Error transition"))?; self.file_writer.take().unwrap().finalize()?; Ok(std::mem::take(&mut self.stats)) } @@ -492,11 +511,15 @@ impl RawPacker { /// # Returns /// /// The number of bytes written. - fn write_data(&mut self, data: &[u8]) -> RusticResult { + fn write_data(&mut self, data: &[u8]) -> PackerResult { let len = data .len() .try_into() - .map_err(PackerErrorKind::IntConversionFailed)?; + .map_err(|err| PackerErrorKind::ConversionFailed { + to: "u32", + from: "usize", + source: err, + })?; self.file.extend_from_slice(data); self.size += len; Ok(len) @@ -524,16 +547,20 @@ impl RawPacker { data_len: u64, uncompressed_length: Option, size_limit: Option, - ) -> RusticResult<()> { + ) -> PackerResult<()> { if self.has(id) { return Ok(()); } self.stats.blobs += 1; self.stats.data += data_len; - let data_len_packed: u64 = data - .len() - .try_into() - .map_err(PackerErrorKind::IntConversionFailed)?; + let data_len_packed: u64 = + data.len() + .try_into() + .map_err(|err| PackerErrorKind::ConversionFailed { + to: "u64", + from: "usize", + source: err, + })?; self.stats.data_packed += data_len_packed; let size_limit = size_limit.unwrap_or_else(|| self.pack_sizer.pack_size()); @@ -570,21 +597,35 @@ impl RawPacker { /// /// [`PackerErrorKind::IntConversionFailed`]: crate::error::PackerErrorKind::IntConversionFailed /// [`PackFileErrorKind::WritingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::WritingBinaryRepresentationFailed - fn write_header(&mut self) -> RusticResult<()> { + fn write_header(&mut self) -> PackerResult<()> { // compute the pack header - let data = PackHeaderRef::from_index_pack(&self.index).to_binary()?; + let data = PackHeaderRef::from_index_pack(&self.index) + .to_binary() + .map_err(|_err| todo!("Error transition"))?; // encrypt and write to pack file - let data = self.be.key().encrypt_data(&data)?; + let data = self + .be + .key() + .encrypt_data(&data) + .map_err(|_err| todo!("Error transition"))?; let headerlen = data .len() .try_into() - .map_err(PackerErrorKind::IntConversionFailed)?; + .map_err(|err| PackerErrorKind::ConversionFailed { + to: "u32", + from: "usize", + source: err, + })?; _ = self.write_data(&data)?; // finally write length of header unencrypted to pack file - _ = self.write_data(&PackHeaderLength::from_u32(headerlen).to_binary()?)?; + _ = self.write_data( + &PackHeaderLength::from_u32(headerlen) + .to_binary() + .map_err(|_err| todo!("Error transition"))?, + )?; Ok(()) } @@ -602,7 +643,7 @@ impl RawPacker { /// /// [`PackerErrorKind::IntConversionFailed`]: crate::error::PackerErrorKind::IntConversionFailed /// [`PackFileErrorKind::WritingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::WritingBinaryRepresentationFailed - fn save(&mut self) -> RusticResult<()> { + fn save(&mut self) -> PackerResult<()> { if self.size == 0 { return Ok(()); } @@ -645,8 +686,7 @@ impl FileWriterHandle { let (file, id, mut index) = load; index.id = id; self.be - .write_bytes(FileType::Pack, &id, self.cacheable, file) - .map_err(RusticErrorKind::Backend)?; + .write_bytes(FileType::Pack, &id, self.cacheable, file)?; index.time = Some(Local::now()); Ok(index) } @@ -718,10 +758,10 @@ impl Actor { /// # Errors /// /// If sending the message to the actor fails. - fn send(&self, load: (Bytes, IndexPack)) -> RusticResult<()> { + fn send(&self, load: (Bytes, IndexPack)) -> PackerResult<()> { self.sender .send(load) - .map_err(PackerErrorKind::SendingCrossbeamMessageFailedForIndexPack)?; + .map_err(|_err| todo!("Error transition"))?; Ok(()) } @@ -802,23 +842,22 @@ impl Repacker { /// If the blob could not be added /// If reading the blob from the backend fails pub fn add_fast(&self, pack_id: &PackId, blob: &IndexBlob) -> RusticResult<()> { - let data = self - .be - .read_partial( - FileType::Pack, - pack_id, - blob.tpe.is_cacheable(), - blob.offset, - blob.length, - ) - .map_err(RusticErrorKind::Backend)?; - self.packer.add_raw( - &data, - &blob.id, - 0, - blob.uncompressed_length, - Some(self.size_limit), + let data = self.be.read_partial( + FileType::Pack, + pack_id, + blob.tpe.is_cacheable(), + blob.offset, + blob.length, )?; + self.packer + .add_raw( + &data, + &blob.id, + 0, + blob.uncompressed_length, + Some(self.size_limit), + ) + .map_err(|_err| todo!("Error transition"))?; Ok(()) } @@ -842,8 +881,11 @@ impl Repacker { blob.length, blob.uncompressed_length, )?; + self.packer - .add_with_sizelimit(data, blob.id, Some(self.size_limit))?; + .add_with_sizelimit(data, blob.id, Some(self.size_limit)) + .map_err(|_err| todo!("Error transition"))?; + Ok(()) } diff --git a/crates/core/src/blob/tree.rs b/crates/core/src/blob/tree.rs index ffd035779..7d1338b6a 100644 --- a/crates/core/src/blob/tree.rs +++ b/crates/core/src/blob/tree.rs @@ -4,7 +4,7 @@ use std::{ ffi::{OsStr, OsString}, mem, path::{Component, Path, PathBuf, Prefix}, - str, + str::{self, Utf8Error}, }; use crossbeam_channel::{bounded, unbounded, Receiver, Sender}; @@ -12,7 +12,6 @@ use derivative::Derivative; use derive_setters::Setters; use ignore::overrides::{Override, OverrideBuilder}; use ignore::Match; - use serde::{Deserialize, Deserializer}; use serde_derive::Serialize; @@ -23,13 +22,44 @@ use crate::{ }, blob::BlobType, crypto::hasher::hash, - error::{RusticResult, TreeErrorKind}, + error::RusticResult, impl_blobid, index::ReadGlobalIndex, progress::Progress, repofile::snapshotfile::SnapshotSummary, }; +/// [`TreeErrorKind`] describes the errors that can come up dealing with Trees +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum TreeErrorKind { + /// blob `{0}` not found in index + BlobIdNotFound(TreeId), + /// `{0:?}` is not a directory + NotADirectory(OsString), + /// Path `{0:?}` not found + PathNotFound(OsString), + /// path should not contain current or parent dir + ContainsCurrentOrParentDirectory, + /// serde_json couldn't serialize the tree: `{0:?}` + SerializingTreeFailed(serde_json::Error), + /// serde_json couldn't deserialize tree from bytes of JSON text: `{0:?}` + DeserializingTreeFailed(serde_json::Error), + /// slice is not UTF-8: `{0:?}` + PathIsNotUtf8Conform(Utf8Error), + /// error in building nodestreamer: `{0:?}` + BuildingNodeStreamerFailed(ignore::Error), + /// failed to read file string from glob file: `{0:?}` + ReadingFileStringFromGlobsFailed(std::io::Error), + /// Error `{kind}` in tree streamer: `{source}` + Channel { + kind: &'static str, + source: Box, + }, +} + +pub(crate) type TreeResult = Result; + pub(super) mod constants { /// The maximum number of trees that are loaded in parallel pub(super) const MAX_TREE_LOADER: usize = 4; @@ -81,10 +111,15 @@ impl Tree { /// # Returns /// /// A tuple of the serialized tree as `Vec` and the tree's ID - pub(crate) fn serialize(&self) -> RusticResult<(Vec, TreeId)> { + pub(crate) fn serialize(&self) -> TreeResult<(Vec, TreeId)> { let mut chunk = serde_json::to_vec(&self).map_err(TreeErrorKind::SerializingTreeFailed)?; - chunk.push(b'\n'); // for whatever reason, restic adds a newline, so to be compatible... + // # COMPATIBILITY + // + // We add a newline to be compatible with `restic` here + chunk.push(b'\n'); + let id = hash(&chunk).into(); + Ok((chunk, id)) } @@ -113,10 +148,15 @@ impl Tree { ) -> RusticResult { let data = index .get_tree(&id) - .ok_or_else(|| TreeErrorKind::BlobIdNotFound(id))? + .ok_or_else(|| TreeErrorKind::BlobIdNotFound(id)) + .map_err(|_err| todo!("Error transition"))? .read_data(be)?; - Ok(serde_json::from_slice(&data).map_err(TreeErrorKind::DeserializingTreeFailed)?) + let tree = serde_json::from_slice(&data) + .map_err(TreeErrorKind::DeserializingTreeFailed) + .map_err(|_err| todo!("Error transition"))?; + + Ok(tree) } /// Creates a new node from a path. @@ -146,16 +186,18 @@ impl Tree { node.subtree = Some(id); for p in path.components() { - if let Some(p) = comp_to_osstr(p)? { + if let Some(p) = comp_to_osstr(p).map_err(|_err| todo!("Error transition"))? { let id = node .subtree - .ok_or_else(|| TreeErrorKind::NotADirectory(p.clone()))?; + .ok_or_else(|| TreeErrorKind::NotADirectory(p.clone())) + .map_err(|_err| todo!("Error transition"))?; let tree = Self::from_backend(be, index, id)?; node = tree .nodes .into_iter() .find(|node| node.name() == p) - .ok_or_else(|| TreeErrorKind::PathNotFound(p.clone()))?; + .ok_or_else(|| TreeErrorKind::PathNotFound(p.clone())) + .map_err(|_err| todo!("Error transition"))?; } } @@ -195,7 +237,8 @@ impl Tree { } else { let id = node .subtree - .ok_or_else(|| TreeErrorKind::NotADirectory(path_comp[idx].clone()))?; + .ok_or_else(|| TreeErrorKind::NotADirectory(path_comp[idx].clone())) + .map_err(|_err| todo!("Error transition"))?; find_node_from_component( be, @@ -217,7 +260,8 @@ impl Tree { let path_comp: Vec<_> = path .components() .filter_map(|p| comp_to_osstr(p).transpose()) - .collect::>()?; + .collect::>() + .map_err(|_err| todo!("Error transition"))?; // caching all results let mut results_cache = vec![BTreeMap::new(); path_comp.len()]; @@ -291,7 +335,8 @@ impl Tree { if node.is_dir() { let id = node .subtree - .ok_or_else(|| TreeErrorKind::NotADirectory(node.name()))?; + .ok_or_else(|| TreeErrorKind::NotADirectory(node.name())) + .map_err(|_err| todo!("Error transition"))?; result.append(&mut find_matching_nodes_recursive( be, index, id, &node_path, state, matches, )?); @@ -366,7 +411,7 @@ pub struct FindMatches { /// /// [`TreeErrorKind::ContainsCurrentOrParentDirectory`]: crate::error::TreeErrorKind::ContainsCurrentOrParentDirectory /// [`TreeErrorKind::PathIsNotUtf8Conform`]: crate::error::TreeErrorKind::PathIsNotUtf8Conform -pub(crate) fn comp_to_osstr(p: Component<'_>) -> RusticResult> { +pub(crate) fn comp_to_osstr(p: Component<'_>) -> TreeResult> { let s = match p { Component::RootDir => None, Component::Prefix(p) => match p.kind() { @@ -472,7 +517,6 @@ where /// /// [`TreeErrorKind::BlobIdNotFound`]: crate::error::TreeErrorKind::BlobIdNotFound /// [`TreeErrorKind::DeserializingTreeFailed`]: crate::error::TreeErrorKind::DeserializingTreeFailed - #[allow(unused)] pub fn new(be: BE, index: &'a I, node: &Node) -> RusticResult { Self::new_streamer(be, index, node, None, true) } @@ -544,42 +588,50 @@ where for g in &opts.glob { _ = override_builder .add(g) - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; } for file in &opts.glob_file { for line in std::fs::read_to_string(file) - .map_err(TreeErrorKind::ReadingFileStringFromGlobsFailed)? + .map_err(TreeErrorKind::ReadingFileStringFromGlobsFailed) + .map_err(|_err| todo!("Error transition"))? .lines() { _ = override_builder .add(line) - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; } } _ = override_builder .case_insensitive(true) - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; for g in &opts.iglob { _ = override_builder .add(g) - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; } for file in &opts.iglob_file { for line in std::fs::read_to_string(file) - .map_err(TreeErrorKind::ReadingFileStringFromGlobsFailed)? + .map_err(TreeErrorKind::ReadingFileStringFromGlobsFailed) + .map_err(|_err| todo!("Error transition"))? .lines() { _ = override_builder .add(line) - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; } } let overrides = override_builder .build() - .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; + .map_err(TreeErrorKind::BuildingNodeStreamerFailed) + .map_err(|_err| todo!("Error transition"))?; Self::new_streamer(be, index, node, Some(overrides), opts.recursive) } @@ -707,7 +759,10 @@ impl TreeStreamerOnce

{ }; for (count, id) in ids.into_iter().enumerate() { - if !streamer.add_pending(PathBuf::new(), id, count)? { + if !streamer + .add_pending(PathBuf::new(), id, count) + .map_err(|_err| todo!("Error transition"))? + { streamer.p.inc(1); streamer.finished_ids += 1; } @@ -733,13 +788,17 @@ impl TreeStreamerOnce

{ /// * [`TreeErrorKind::SendingCrossbeamMessageFailed`] - If sending the message fails. /// /// [`TreeErrorKind::SendingCrossbeamMessageFailed`]: crate::error::TreeErrorKind::SendingCrossbeamMessageFailed - fn add_pending(&mut self, path: PathBuf, id: TreeId, count: usize) -> RusticResult { + fn add_pending(&mut self, path: PathBuf, id: TreeId, count: usize) -> TreeResult { if self.visited.insert(id) { self.queue_in .as_ref() .unwrap() .send((path, id, count)) - .map_err(TreeErrorKind::SendingCrossbeamMessageFailed)?; + .map_err(|err| TreeErrorKind::Channel { + kind: "sending crossbeam message", + source: err.into(), + })?; + self.counter[count] += 1; Ok(true) } else { @@ -760,9 +819,13 @@ impl Iterator for TreeStreamerOnce

{ let (path, tree, count) = match self.queue_out.recv() { Ok(Ok(res)) => res, Err(err) => { - return Some(Err( - TreeErrorKind::ReceivingCrossbreamMessageFailed(err).into() - )) + return Some( + Err(TreeErrorKind::Channel { + kind: "receiving crossbeam message", + source: err.into(), + }) + .map_err(|_err| todo!("Error transition")), + ) } Ok(Err(err)) => return Some(Err(err)), }; @@ -773,7 +836,7 @@ impl Iterator for TreeStreamerOnce

{ path.push(node.name()); match self.add_pending(path, id, count) { Ok(_) => {} - Err(err) => return Some(Err(err)), + Err(err) => return Some(Err(err).map_err(|_err| todo!("Error transition"))), } } } diff --git a/crates/core/src/chunker.rs b/crates/core/src/chunker.rs index fb6a300bd..93d649659 100644 --- a/crates/core/src/chunker.rs +++ b/crates/core/src/chunker.rs @@ -7,9 +7,17 @@ use crate::{ polynom::{Polynom, Polynom64}, rolling_hash::{Rabin64, RollingHash64}, }, - error::{PolynomialErrorKind, RusticResult}, + error::RusticResult, }; +/// [`PolynomialErrorKind`] describes the errors that can happen while dealing with Polynomials +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum PolynomialErrorKind { + /// no suitable polynomial found + NoSuitablePolynomialFound, +} + pub(super) mod constants { /// The Splitmask is used to determine if a chunk is a chunk boundary. pub(super) const SPLITMASK: u64 = (1u64 << 20) - 1; @@ -191,7 +199,8 @@ pub fn random_poly() -> RusticResult { return Ok(poly); } } - Err(PolynomialErrorKind::NoSuitablePolynomialFound.into()) + + todo!("create rustic error Err(PolynomialErrorKind::NoSuitablePolynomialFound)"); } /// A trait for extending polynomials. diff --git a/crates/core/src/commands.rs b/crates/core/src/commands.rs index e4d15a122..61c79cf03 100644 --- a/crates/core/src/commands.rs +++ b/crates/core/src/commands.rs @@ -1,5 +1,11 @@ //! The commands that can be run by the CLI. +use std::{num::TryFromIntError, path::PathBuf}; + +use chrono::OutOfRangeError; + +use crate::{backend::node::NodeType, blob::BlobId, repofile::packfile::PackId, RusticError}; + pub mod backup; /// The `cat` command. pub mod cat; @@ -20,3 +26,41 @@ pub mod repair; pub mod repoinfo; pub mod restore; pub mod snapshots; + +/// [`CommandErrorKind`] describes the errors that can happen while executing a high-level command +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum CommandErrorKind { + /// path is no dir: `{0}` + PathIsNoDir(String), + /// used blobs are missing: blob `{0}` doesn't existing + BlobsMissing(BlobId), + /// used pack `{0}`: size does not match! Expected size: `{1}`, real size: `{2}` + PackSizeNotMatching(PackId, u32, u32), + /// used pack `{0}` does not exist! + PackNotExisting(PackId), + /// pack `{0}` got no decision what to do + NoDecision(PackId), + /// Bytesize parser failed: `{0}` + FromByteSizeParser(String), + /// --repack-uncompressed makes no sense for v1 repo! + RepackUncompressedRepoV1, + /// datetime out of range: `{0}` + FromOutOfRangeError(OutOfRangeError), + /// node type `{0:?}` not supported by dump + DumpNotSupported(NodeType), + /// error creating `{0:?}`: `{1:?}` + ErrorCreating(PathBuf, Box), + /// error collecting information for `{0:?}`: `{1:?}` + ErrorCollecting(PathBuf, Box), + /// error setting length for `{0:?}`: `{1:?}` + ErrorSettingLength(PathBuf, Box), + /// Conversion from integer failed: `{0:?}` + ConversionFromIntFailed(TryFromIntError), + /// Specify one of the keep-* options for forget! Please use keep-none to keep no snapshot. + NoKeepOption, + /// Checking the repository failed! + CheckFailed, +} + +pub(crate) type CommandResult = Result; diff --git a/crates/core/src/commands/backup.rs b/crates/core/src/commands/backup.rs index 1d6b60d0e..bfac5ba9a 100644 --- a/crates/core/src/commands/backup.rs +++ b/crates/core/src/commands/backup.rs @@ -229,12 +229,22 @@ pub(crate) fn backup( let as_path = opts .as_path .as_ref() - .map(|p| -> RusticResult<_> { Ok(p.parse_dot()?.to_path_buf()) }) + .map(|p| -> RusticResult<_> { + Ok(p.parse_dot() + .map_err(|_err| todo!("Error transition"))? + .to_path_buf()) + }) .transpose()?; match &as_path { - Some(p) => snap.paths.set_paths(&[p.clone()])?, - None => snap.paths.set_paths(&backup_path)?, + Some(p) => snap + .paths + .set_paths(&[p.clone()]) + .map_err(|_err| todo!("Error transition"))?, + None => snap + .paths + .set_paths(&backup_path) + .map_err(|_err| todo!("Error transition"))?, }; let (parent_id, parent) = opts.parent_opts.get_parent(repo, &snap, backup_stdin); diff --git a/crates/core/src/commands/cat.rs b/crates/core/src/commands/cat.rs index 9c45a0778..38fa566d0 100644 --- a/crates/core/src/commands/cat.rs +++ b/crates/core/src/commands/cat.rs @@ -5,7 +5,7 @@ use bytes::Bytes; use crate::{ backend::{decrypt::DecryptReadBackend, FileType, FindInBackend}, blob::{tree::Tree, BlobId, BlobType}, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, index::ReadIndex, progress::ProgressBars, repofile::SnapshotFile, @@ -114,7 +114,8 @@ pub(crate) fn cat_tree( let node = Tree::node_from_path(repo.dbe(), repo.index(), snap.tree, Path::new(path))?; let id = node .subtree - .ok_or_else(|| CommandErrorKind::PathIsNoDir(path.to_string()))?; + .ok_or_else(|| CommandErrorKind::PathIsNoDir(path.to_string())) + .map_err(|_err| todo!("Error transition"))?; let data = repo .index() .blob_from_backend(repo.dbe(), BlobType::Tree, &BlobId::from(*id))?; diff --git a/crates/core/src/commands/check.rs b/crates/core/src/commands/check.rs index 443e828ae..743259a60 100644 --- a/crates/core/src/commands/check.rs +++ b/crates/core/src/commands/check.rs @@ -2,6 +2,7 @@ use std::{ collections::{BTreeSet, HashMap}, fmt::Debug, + path::PathBuf, str::FromStr, }; @@ -18,7 +19,7 @@ use crate::{ backend::{cache::Cache, decrypt::DecryptReadBackend, node::NodeType, FileType, ReadBackend}, blob::{tree::TreeStreamerOnce, BlobId, BlobType}, crypto::hasher::hash, - error::{CommandErrorKind, RusticErrorKind, RusticResult}, + error::{RusticError, RusticResult}, id::Id, index::{ binarysorted::{IndexCollector, IndexType}, @@ -32,6 +33,100 @@ use crate::{ TreeId, }; +#[non_exhaustive] +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum CheckCheckCommandErrorKind { + /// error reading pack {id} : {source} + ErrorReadingPack { + id: PackId, + source: Box, + }, + /// cold file for hot file Type: {file_type:?}, Id: {id} does not exist + NoColdFile { id: Id, file_type: FileType }, + /// Type: {file_type:?}, Id: {id}: hot size: {size_hot}, actual size: {size} + HotFileSizeMismatch { + id: Id, + file_type: FileType, + size_hot: u32, + size: u32, + }, + /// hot file Type: {file_type:?}, Id: {id} is missing! + NoHotFile { id: Id, file_type: FileType }, + /// Error reading cached file Type: {file_type:?}, Id: {id} : {source} + ErrorReadingCache { + id: Id, + file_type: FileType, + source: Box, + }, + /// Error reading file Type: {file_type:?}, Id: {id} : {source} + ErrorReadingFile { + id: Id, + file_type: FileType, + source: Box, + }, + /// Cached file Type: {file_type:?}, Id: {id} is not identical to backend! + CacheMismatch { id: Id, file_type: FileType }, + /// pack {id}: No time is set! Run prune to correct this! + PackTimeNotSet { id: PackId }, + /// pack {id}: blob {blob_id} blob type does not match: type: {blob_type:?}, expected: {expected:?} + PackBlobTypesMismatch { + id: PackId, + blob_id: BlobId, + blob_type: BlobType, + expected: BlobType, + }, + /// pack {id}: blob {blob_id} offset in index: {offset}, expected: {expected} + PackBlobOffsetMismatch { + id: PackId, + blob_id: BlobId, + offset: u32, + expected: u32, + }, + /// pack {id} not referenced in index. Can be a parallel backup job. To repair: 'rustic repair index'. + PackNotReferenced { id: Id }, + /// pack {id}: size computed by index: {index_size}, actual size: {size}. To repair: 'rustic repair index'. + PackSizeMismatchIndex { id: Id, index_size: u32, size: u32 }, + /// pack {id} is referenced by the index but not present! To repair: 'rustic repair index'." + NoPack { id: PackId }, + /// file {file:?} doesn't have a content + FileHasNoContent { file: PathBuf }, + /// file {file:?} blob {blob_num} has null ID + FileBlobHasNullId { file: PathBuf, blob_num: usize }, + /// file {file:?} blob {blob_id} is missing in index + FileBlobNotInIndex { file: PathBuf, blob_id: Id }, + /// dir {dir:?} doesn't have a subtree + NoSubTree { dir: PathBuf }, + /// "dir {dir:?} subtree has null ID + NullSubTree { dir: PathBuf }, + /// pack {id}: data size does not match expected size. Read: {size} bytes, expected: {expected} bytes + PackSizeMismatch { + id: PackId, + size: usize, + expected: usize, + }, + /// pack {id}: Hash mismatch. Computed hash: {computed} + PackHashMismatch { id: PackId, computed: PackId }, + /// pack {id}: Header length in pack file doesn't match index. In pack: {length}, computed: {computed} + PackHeaderLengthMismatch { + id: PackId, + length: u32, + computed: u32, + }, + /// pack {id}: Header from pack file does not match the index + PackHeaderMismatchIndex { id: PackId }, + /// pack {id}, blob {blob_id}: Actual uncompressed length does not fit saved uncompressed length + PackBlobLengthMismatch { id: PackId, blob_id: BlobId }, + /// pack {id}, blob {blob_id}: Hash mismatch. Computed hash: {computed} + PackBlobHashMismatch { + id: PackId, + blob_id: BlobId, + computed: BlobId, + }, +} + +pub(crate) type CheckResult = Result<(), Vec>; + #[derive(Clone, Copy, Debug, Default)] #[non_exhaustive] /// Options to specify which subset of packs will be read @@ -97,8 +192,12 @@ impl ReadSubsetOption { } } -/// parses n/m inclding named settings depending on current date -fn parse_n_m(now: NaiveDateTime, n_in: &str, m_in: &str) -> Result<(u32, u32), CommandErrorKind> { +/// parses n/m including named settings depending on current date +fn parse_n_m( + now: NaiveDateTime, + n_in: &str, + m_in: &str, +) -> Result<(u32, u32), CheckCommandErrorKind> { let is_leap_year = |dt: NaiveDateTime| { let year = dt.year(); year % 4 == 0 && (year % 25 != 0 || year % 16 == 0) @@ -139,7 +238,7 @@ fn parse_n_m(now: NaiveDateTime, n_in: &str, m_in: &str) -> Result<(u32, u32), C } impl FromStr for ReadSubsetOption { - type Err = CommandErrorKind; + type Err = CheckCommandErrorKind; fn from_str(s: &str) -> Result { let result = if s == "all" { Self::All @@ -152,7 +251,7 @@ impl FromStr for ReadSubsetOption { } else { Self::Size( ByteSize::from_str(s) - .map_err(CommandErrorKind::FromByteSizeParser)? + .map_err(CheckCommandErrorKind::FromByteSizeParser)? .as_u64(), ) }; diff --git a/crates/core/src/commands/config.rs b/crates/core/src/commands/config.rs index 195f87012..29122138f 100644 --- a/crates/core/src/commands/config.rs +++ b/crates/core/src/commands/config.rs @@ -1,15 +1,40 @@ //! `config` subcommand +use std::ops::RangeInclusive; + use bytesize::ByteSize; use derive_setters::Setters; use crate::{ backend::decrypt::{DecryptBackend, DecryptWriteBackend}, crypto::CryptoKey, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, repofile::ConfigFile, repository::{Open, Repository}, }; +#[non_exhaustive] +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum ConfigCommandErrorKind { + /// Not allowed on an append-only repository: `{0}` + NotAllowedWithAppendOnly(String), + /// compression level `{0}` is not supported for repo v1 + NoCompressionV1Repo(i32), + /// version `{0}` is not supported. Allowed values: {1:?} + VersionNotSupported(u32, RangeInclusive), + /// compression level `{0}` is not supported. Allowed values: `{1:?}` + CompressionLevelNotSupported(i32, RangeInclusive), + /// cannot downgrade version from `{0}` to `{1}` + CannotDowngrade(u32, u32), + /// Size is too large: `{0}` + SizeTooLarge(ByteSize), + /// min_packsize_tolerate_percent must be <= 100 + MinPackSizeTolerateWrong, + /// max_packsize_tolerate_percent must be >= 100 or 0" + MaxPackSizeTolerateWrong, +} + +pub(crate) type ConfigCommandResult = Result; + /// Apply the [`ConfigOptions`] to a given [`ConfigFile`] /// /// # Type Parameters @@ -24,34 +49,38 @@ use crate::{ /// /// # Errors /// -/// * [`CommandErrorKind::VersionNotSupported`] - If the version is not supported -/// * [`CommandErrorKind::CannotDowngrade`] - If the version is lower than the current version -/// * [`CommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo -/// * [`CommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported -/// * [`CommandErrorKind::SizeTooLarge`] - If the size is too large -/// * [`CommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerance percent is wrong -/// * [`CommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerance percent is wrong +/// * [`ConfigCommandErrorKind::VersionNotSupported`] - If the version is not supported +/// * [`ConfigCommandErrorKind::CannotDowngrade`] - If the version is lower than the current version +/// * [`ConfigCommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo +/// * [`ConfigCommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported +/// * [`ConfigCommandErrorKind::SizeTooLarge`] - If the size is too large +/// * [`ConfigCommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerance percent is wrong +/// * [`ConfigCommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerance percent is wrong /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. /// /// # Returns /// /// Whether the config was changed /// -/// [`CommandErrorKind::VersionNotSupported`]: crate::error::CommandErrorKind::VersionNotSupported -/// [`CommandErrorKind::CannotDowngrade`]: crate::error::CommandErrorKind::CannotDowngrade -/// [`CommandErrorKind::NoCompressionV1Repo`]: crate::error::CommandErrorKind::NoCompressionV1Repo -/// [`CommandErrorKind::CompressionLevelNotSupported`]: crate::error::CommandErrorKind::CompressionLevelNotSupported -/// [`CommandErrorKind::SizeTooLarge`]: crate::error::CommandErrorKind::SizeTooLarge -/// [`CommandErrorKind::MinPackSizeTolerateWrong`]: crate::error::CommandErrorKind::MinPackSizeTolerateWrong -/// [`CommandErrorKind::MaxPackSizeTolerateWrong`]: crate::error::CommandErrorKind::MaxPackSizeTolerateWrong +/// [`ConfigCommandErrorKind::VersionNotSupported`]: ConfigCommandErrorKind::VersionNotSupported +/// [`ConfigCommandErrorKind::CannotDowngrade`]: ConfigCommandErrorKind::CannotDowngrade +/// [`ConfigCommandErrorKind::NoCompressionV1Repo`]: ConfigCommandErrorKind::NoCompressionV1Repo +/// [`ConfigCommandErrorKind::CompressionLevelNotSupported`]: ConfigCommandErrorKind::CompressionLevelNotSupported +/// [`ConfigCommandErrorKind::SizeTooLarge`]: ConfigCommandErrorKind::SizeTooLarge +/// [`ConfigCommandErrorKind::MinPackSizeTolerateWrong`]: ConfigCommandErrorKind::MinPackSizeTolerateWrong +/// [`ConfigCommandErrorKind::MaxPackSizeTolerateWrong`]: ConfigCommandErrorKind::MaxPackSizeTolerateWrong /// [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`]: crate::error::CryptBackendErrorKind::SerializingToJsonByteVectorFailed pub(crate) fn apply_config( repo: &Repository, opts: &ConfigOptions, ) -> RusticResult { if repo.config().append_only == Some(true) { - return Err(CommandErrorKind::NotAllowedWithAppendOnly("config change".to_string()).into()); + return Err(ConfigCommandErrorKind::NotAllowedWithAppendOnly( + "config change".to_string(), + )) + .map_err(|_err| todo!("Error transition")); } + let mut new_config = repo.config().clone(); opts.apply(&mut new_config)?; if &new_config == repo.config() { @@ -92,10 +121,11 @@ pub(crate) fn save_config( if let Some(hot_be) = repo.be_hot.clone() { // save config to hot repo - let dbe = DecryptBackend::new(hot_be, key); + let dbe = DecryptBackend::new(hot_be.clone(), key); new_config.is_hot = Some(true); _ = dbe.save_file_uncompressed(&new_config)?; } + Ok(()) } @@ -183,41 +213,49 @@ impl ConfigOptions { /// /// # Errors /// - /// * [`CommandErrorKind::VersionNotSupported`] - If the version is not supported - /// * [`CommandErrorKind::CannotDowngrade`] - If the version is lower than the current version - /// * [`CommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo - /// * [`CommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported - /// * [`CommandErrorKind::SizeTooLarge`] - If the size is too large - /// * [`CommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerate percent is wrong - /// * [`CommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerate percent is wrong + /// * [`ConfigCommandErrorKind::VersionNotSupported`] - If the version is not supported + /// * [`ConfigCommandErrorKind::CannotDowngrade`] - If the version is lower than the current version + /// * [`ConfigCommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo + /// * [`ConfigCommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported + /// * [`ConfigCommandErrorKind::SizeTooLarge`] - If the size is too large + /// * [`ConfigCommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerate percent is wrong + /// * [`ConfigCommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerate percent is wrong /// - /// [`CommandErrorKind::VersionNotSupported`]: crate::error::CommandErrorKind::VersionNotSupported - /// [`CommandErrorKind::CannotDowngrade`]: crate::error::CommandErrorKind::CannotDowngrade - /// [`CommandErrorKind::NoCompressionV1Repo`]: crate::error::CommandErrorKind::NoCompressionV1Repo - /// [`CommandErrorKind::CompressionLevelNotSupported`]: crate::error::CommandErrorKind::CompressionLevelNotSupported - /// [`CommandErrorKind::SizeTooLarge`]: crate::error::CommandErrorKind::SizeTooLarge - /// [`CommandErrorKind::MinPackSizeTolerateWrong`]: crate::error::CommandErrorKind::MinPackSizeTolerateWrong - /// [`CommandErrorKind::MaxPackSizeTolerateWrong`]: crate::error::CommandErrorKind::MaxPackSizeTolerateWrong + /// [`ConfigCommandErrorKind::VersionNotSupported`]: ConfigCommandErrorKind::VersionNotSupported + /// [`ConfigCommandErrorKind::CannotDowngrade`]: ConfigCommandErrorKind::CannotDowngrade + /// [`ConfigCommandErrorKind::NoCompressionV1Repo`]: ConfigCommandErrorKind::NoCompressionV1Repo + /// [`ConfigCommandErrorKind::CompressionLevelNotSupported`]: ConfigCommandErrorKind::CompressionLevelNotSupported + /// [`ConfigCommandErrorKind::SizeTooLarge`]: ConfigCommandErrorKind::SizeTooLarge + /// [`ConfigCommandErrorKind::MinPackSizeTolerateWrong`]: ConfigCommandErrorKind::MinPackSizeTolerateWrong + /// [`ConfigCommandErrorKind::MaxPackSizeTolerateWrong`]: ConfigCommandErrorKind::MaxPackSizeTolerateWrong pub fn apply(&self, config: &mut ConfigFile) -> RusticResult<()> { if let Some(version) = self.set_version { let range = 1..=2; if !range.contains(&version) { - return Err(CommandErrorKind::VersionNotSupported(version, range).into()); + return Err(ConfigCommandErrorKind::VersionNotSupported(version, range)) + .map_err(|_err| todo!("Error transition")); } else if version < config.version { - return Err(CommandErrorKind::CannotDowngrade(config.version, version).into()); + return Err(ConfigCommandErrorKind::CannotDowngrade( + config.version, + version, + )) + .map_err(|_err| todo!("Error transition")); } config.version = version; } if let Some(compression) = self.set_compression { if config.version == 1 && compression != 0 { - return Err(CommandErrorKind::NoCompressionV1Repo(compression).into()); + return Err(ConfigCommandErrorKind::NoCompressionV1Repo(compression)) + .map_err(|_err| todo!("Error transition")); } let range = zstd::compression_level_range(); if !range.contains(&compression) { - return Err( - CommandErrorKind::CompressionLevelNotSupported(compression, range).into(), - ); + return Err(ConfigCommandErrorKind::CompressionLevelNotSupported( + compression, + range, + )) + .map_err(|_err| todo!("Error transition")); } config.compression = Some(compression); } @@ -230,7 +268,8 @@ impl ConfigOptions { config.treepack_size = Some( size.as_u64() .try_into() - .map_err(|_| CommandErrorKind::SizeTooLarge(size))?, + .map_err(|_| ConfigCommandErrorKind::SizeTooLarge(size)) + .map_err(|_err| todo!("Error transition"))?, ); } if let Some(factor) = self.set_treepack_growfactor { @@ -240,7 +279,8 @@ impl ConfigOptions { config.treepack_size_limit = Some( size.as_u64() .try_into() - .map_err(|_| CommandErrorKind::SizeTooLarge(size))?, + .map_err(|_| ConfigCommandErrorKind::SizeTooLarge(size)) + .map_err(|_err| todo!("Error transition"))?, ); } @@ -248,7 +288,8 @@ impl ConfigOptions { config.datapack_size = Some( size.as_u64() .try_into() - .map_err(|_| CommandErrorKind::SizeTooLarge(size))?, + .map_err(|_| ConfigCommandErrorKind::SizeTooLarge(size)) + .map_err(|_err| todo!("Error transition"))?, ); } if let Some(factor) = self.set_datapack_growfactor { @@ -258,20 +299,23 @@ impl ConfigOptions { config.datapack_size_limit = Some( size.as_u64() .try_into() - .map_err(|_| CommandErrorKind::SizeTooLarge(size))?, + .map_err(|_| ConfigCommandErrorKind::SizeTooLarge(size)) + .map_err(|_err| todo!("Error transition"))?, ); } if let Some(percent) = self.set_min_packsize_tolerate_percent { if percent > 100 { - return Err(CommandErrorKind::MinPackSizeTolerateWrong.into()); + return Err(ConfigCommandErrorKind::MinPackSizeTolerateWrong) + .map_err(|_err| todo!("Error transition")); } config.min_packsize_tolerate_percent = Some(percent); } if let Some(percent) = self.set_max_packsize_tolerate_percent { if percent < 100 && percent > 0 { - return Err(CommandErrorKind::MaxPackSizeTolerateWrong.into()); + return Err(ConfigCommandErrorKind::MaxPackSizeTolerateWrong) + .map_err(|_err| todo!("Error transition")); } config.max_packsize_tolerate_percent = Some(percent); } diff --git a/crates/core/src/commands/dump.rs b/crates/core/src/commands/dump.rs index 503a998c7..52ccaa1e2 100644 --- a/crates/core/src/commands/dump.rs +++ b/crates/core/src/commands/dump.rs @@ -3,7 +3,7 @@ use std::io::Write; use crate::{ backend::node::{Node, NodeType}, blob::{BlobId, BlobType}, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, repository::{IndexedFull, Repository}, }; @@ -31,12 +31,14 @@ pub(crate) fn dump( w: &mut impl Write, ) -> RusticResult<()> { if node.node_type != NodeType::File { - return Err(CommandErrorKind::DumpNotSupported(node.node_type.clone()).into()); + return Err(CommandErrorKind::DumpNotSupported(node.node_type.clone()).into()) + .map_err(|_err| todo!("Error transition")); } for id in node.content.as_ref().unwrap() { let data = repo.get_blob_cached(&BlobId::from(**id), BlobType::Data)?; - w.write_all(&data)?; + w.write_all(&data) + .map_err(|_err| todo!("Error transition"))?; } Ok(()) } diff --git a/crates/core/src/commands/forget.rs b/crates/core/src/commands/forget.rs index 0bfd4c42e..dfe76e717 100644 --- a/crates/core/src/commands/forget.rs +++ b/crates/core/src/commands/forget.rs @@ -6,7 +6,7 @@ use serde_derive::{Deserialize, Serialize}; use serde_with::{serde_as, skip_serializing_none, DisplayFromStr}; use crate::{ - error::{CommandErrorKind, RusticResult}, + error::RusticResult, progress::ProgressBars, repofile::{ snapshotfile::{SnapshotGroup, SnapshotGroupCriterion, SnapshotId}, @@ -523,7 +523,7 @@ impl KeepOptions { now: DateTime, ) -> RusticResult> { if !self.is_valid() { - return Err(CommandErrorKind::NoKeepOption.into()); + return Err(CommandErrorKind::NoKeepOption).map_err(|_err| todo!("Error transition")); } let mut group_keep = self.clone(); diff --git a/crates/core/src/commands/init.rs b/crates/core/src/commands/init.rs index f932658ca..6047acb8a 100644 --- a/crates/core/src/commands/init.rs +++ b/crates/core/src/commands/init.rs @@ -10,7 +10,7 @@ use crate::{ key::{init_key, KeyOptions}, }, crypto::aespoly1305::Key, - error::{RusticErrorKind, RusticResult}, + error::RusticResult, id::Id, repofile::{configfile::RepositoryId, ConfigFile}, repository::Repository, @@ -85,7 +85,7 @@ pub(crate) fn init_with_config( key_opts: &KeyOptions, config: &ConfigFile, ) -> RusticResult { - repo.be.create().map_err(RusticErrorKind::Backend)?; + repo.be.create()?; let (key, id) = init_key(repo, key_opts, pass)?; info!("key {id} successfully added."); save_config(repo, config.clone(), key)?; diff --git a/crates/core/src/commands/key.rs b/crates/core/src/commands/key.rs index af277665c..ce2aba593 100644 --- a/crates/core/src/commands/key.rs +++ b/crates/core/src/commands/key.rs @@ -4,7 +4,7 @@ use derive_setters::Setters; use crate::{ backend::{decrypt::DecryptWriteBackend, FileType, WriteBackend}, crypto::{aespoly1305::Key, hasher::hash}, - error::{CommandErrorKind, RusticErrorKind, RusticResult}, + error::RusticResult, repofile::{KeyFile, KeyId}, repository::{Open, Repository}, }; @@ -112,10 +112,10 @@ pub(crate) fn add_key_to_repo( let ko = opts.clone(); let keyfile = KeyFile::generate(key, &pass, ko.hostname, ko.username, ko.with_created)?; - let data = serde_json::to_vec(&keyfile).map_err(CommandErrorKind::FromJsonError)?; + let data = serde_json::to_vec(&keyfile).map_err(|_err| todo!("Error transition"))?; let id = KeyId::from(hash(&data)); repo.be .write_bytes(FileType::Key, &id, false, data.into()) - .map_err(RusticErrorKind::Backend)?; + .map_err(|_err| todo!("Error transition"))?; Ok(id) } diff --git a/crates/core/src/commands/merge.rs b/crates/core/src/commands/merge.rs index 561411fbe..c8cbc1937 100644 --- a/crates/core/src/commands/merge.rs +++ b/crates/core/src/commands/merge.rs @@ -11,7 +11,7 @@ use crate::{ tree::{self, Tree, TreeId}, BlobId, BlobType, }, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, index::{indexer::Indexer, ReadIndex}, progress::{Progress, ProgressBars}, repofile::{PathList, SnapshotFile, SnapshotSummary}, @@ -44,7 +44,9 @@ pub(crate) fn merge_snapshots( .collect::() .merge(); - snap.paths.set_paths(&paths.paths())?; + snap.paths + .set_paths(&paths.paths()) + .map_err(|_err| todo!("Error transition"))?; // set snapshot time to time of latest snapshot to be merged snap.time = snapshots @@ -58,7 +60,9 @@ pub(crate) fn merge_snapshots( let trees: Vec = snapshots.iter().map(|sn| sn.tree).collect(); snap.tree = merge_trees(repo, &trees, cmp, &mut summary)?; - summary.finalize(now)?; + summary + .finalize(now) + .map_err(|_err| todo!("Error transition"))?; snap.summary = Some(summary); snap.id = repo.dbe().save_file(&snap)?.into(); @@ -104,9 +108,9 @@ pub(crate) fn merge_trees( repo.config(), index.total_size(BlobType::Tree), )?; - let save = |tree: Tree| { - let (chunk, new_id) = tree.serialize()?; - let size = u64::try_from(chunk.len()).map_err(CommandErrorKind::ConversionFromIntFailed)?; + let save = |tree: Tree| -> RusticResult<_> { + let (chunk, new_id) = tree.serialize().map_err(|_err| todo!("Error transition"))?; + let size = u64::try_from(chunk.len()).map_err(|_err| todo!("Error transition"))?; if !index.has_tree(&new_id) { packer.add(chunk.into(), BlobId::from(*new_id))?; } diff --git a/crates/core/src/commands/prune.rs b/crates/core/src/commands/prune.rs index d2082f9c5..c36908b6d 100644 --- a/crates/core/src/commands/prune.rs +++ b/crates/core/src/commands/prune.rs @@ -30,7 +30,7 @@ use crate::{ tree::TreeStreamerOnce, BlobId, BlobType, BlobTypeMap, Initialize, }, - error::{CommandErrorKind, RusticErrorKind, RusticResult}, + error::{RusticError, RusticResult}, index::{ binarysorted::{IndexCollector, IndexType}, indexer::Indexer, @@ -198,16 +198,16 @@ pub enum LimitOption { } impl FromStr for LimitOption { - type Err = CommandErrorKind; + type Err = RusticError; fn from_str(s: &str) -> Result { Ok(match s.chars().last().unwrap_or('0') { '%' => Self::Percentage({ let mut copy = s.to_string(); _ = copy.pop(); - copy.parse()? + copy.parse().map_err(|_err| todo!("Error transition"))? }), 'd' if s == "unlimited" => Self::Unlimited, - _ => Self::Size(ByteSize::from_str(s).map_err(CommandErrorKind::FromByteSizeParser)?), + _ => Self::Size(ByteSize::from_str(s).map_err(|_err| todo!("Error transition"))?), }) } } @@ -689,7 +689,8 @@ impl PrunePlan { let be = repo.dbe(); if repo.config().version < 2 && opts.repack_uncompressed { - return Err(CommandErrorKind::RepackUncompressedRepoV1.into()); + return Err(CommandErrorKind::RepackUncompressedRepoV1) + .map_err(|_err| todo!("Error transition")); } let mut index_files = Vec::new(); @@ -718,8 +719,7 @@ impl PrunePlan { // list existing pack files let p = pb.progress_spinner("getting packs from repository..."); let existing_packs: BTreeMap<_, _> = be - .list_with_size(FileType::Pack) - .map_err(RusticErrorKind::Backend)? + .list_with_size(FileType::Pack)? .into_iter() .map(|(id, size)| (PackId::from(id), size)) .collect(); @@ -734,8 +734,8 @@ impl PrunePlan { let pack_sizer = total_size.map(|tpe, size| PackSizer::from_config(repo.config(), tpe, size)); pruner.decide_packs( - Duration::from_std(*opts.keep_pack).map_err(CommandErrorKind::FromOutOfRangeError)?, - Duration::from_std(*opts.keep_delete).map_err(CommandErrorKind::FromOutOfRangeError)?, + Duration::from_std(*opts.keep_pack).map_err(|_err| todo!("Error transition"))?, + Duration::from_std(*opts.keep_delete).map_err(|_err| todo!("Error transition"))?, repack_cacheable_only, opts.repack_uncompressed, opts.repack_all, @@ -781,7 +781,8 @@ impl PrunePlan { fn check(&self) -> RusticResult<()> { for (id, count) in &self.used_ids { if *count == 0 { - return Err(CommandErrorKind::BlobsMissing(*id).into()); + return Err(CommandErrorKind::BlobsMissing(*id)) + .map_err(|_err| todo!("Error transition")); } } Ok(()) @@ -1052,13 +1053,18 @@ impl PrunePlan { Some(size) if size == pack.size => Ok(()), // size is ok => continue Some(size) => Err(CommandErrorKind::PackSizeNotMatching( pack.id, pack.size, size, - )), - None => Err(CommandErrorKind::PackNotExisting(pack.id)), + )) + .map_err(|_err| todo!("Error transition")), + None => Err(CommandErrorKind::PackNotExisting(pack.id)) + .map_err(|_err| todo!("Error transition")), } }; match pack.to_do { - PackToDo::Undecided => return Err(CommandErrorKind::NoDecision(pack.id).into()), + PackToDo::Undecided => { + return Err(CommandErrorKind::NoDecision(pack.id).into()) + .map_err(|_err| todo!("Error transition")) + } PackToDo::Keep | PackToDo::Recover => { for blob in &pack.blobs { _ = self.used_ids.remove(&blob.id); @@ -1301,7 +1307,10 @@ pub(crate) fn prune_repository( .into_par_iter() .try_for_each(|pack| -> RusticResult<_> { match pack.to_do { - PackToDo::Undecided => return Err(CommandErrorKind::NoDecision(pack.id).into()), + PackToDo::Undecided => { + return Err(CommandErrorKind::NoDecision(pack.id)) + .map_err(|_err| todo!("Error transition")) + } PackToDo::Keep => { // keep pack: add to new index let pack = pack.into_index_pack(); @@ -1528,8 +1537,7 @@ fn find_used_blobs( let p = pb.progress_counter("reading snapshots..."); let list: Vec<_> = be - .list(FileType::Snapshot) - .map_err(RusticErrorKind::Backend)? + .list(FileType::Snapshot)? .into_iter() .filter(|id| !ignore_snaps.contains(&SnapshotId::from(*id))) .collect(); diff --git a/crates/core/src/commands/repair/index.rs b/crates/core/src/commands/repair/index.rs index f2e8dc563..7c169f444 100644 --- a/crates/core/src/commands/repair/index.rs +++ b/crates/core/src/commands/repair/index.rs @@ -9,7 +9,7 @@ use crate::{ decrypt::{DecryptReadBackend, DecryptWriteBackend}, FileType, ReadBackend, WriteBackend, }, - error::{CommandErrorKind, RusticErrorKind, RusticResult}, + error::{ErrorKind, RusticResult}, index::{binarysorted::IndexCollector, indexer::Indexer, GlobalIndex}, progress::{Progress, ProgressBars}, repofile::{packfile::PackId, IndexFile, IndexPack, PackHeader, PackHeaderRef}, @@ -44,7 +44,10 @@ pub(crate) fn repair_index( dry_run: bool, ) -> RusticResult<()> { if repo.config().append_only == Some(true) { - return Err(CommandErrorKind::NotAllowedWithAppendOnly("index repair".to_string()).into()); + return Err(CommandErrorKind::NotAllowedWithAppendOnly( + "index repair".to_string(), + )) + .map_err(|_err| todo!("Error transition")); } let be = repo.dbe(); @@ -60,8 +63,7 @@ pub(crate) fn repair_index( if !new_index.packs.is_empty() || !new_index.packs_to_delete.is_empty() { _ = be.save_file(&new_index)?; } - be.remove(FileType::Index, &index_id, true) - .map_err(RusticErrorKind::Backend)?; + be.remove(FileType::Index, &index_id, true)?; } (false, _) => {} // nothing to do } @@ -77,7 +79,7 @@ pub(crate) fn repair_index( pack_read_header .len() .try_into() - .map_err(CommandErrorKind::ConversionFromIntFailed)?, + .map_err(|_err| todo!("Error transition"))?, ); for (id, size_hint, packsize) in pack_read_header { debug!("reading pack {id}..."); @@ -115,8 +117,7 @@ impl PackChecker { let be = repo.dbe(); let p = repo.pb.progress_spinner("listing packs..."); let packs: HashMap<_, _> = be - .list_with_size(FileType::Pack) - .map_err(RusticErrorKind::Backend)? + .list_with_size(FileType::Pack)? .into_iter() .map(|(id, size)| (PackId::from(id), size)) .collect(); @@ -190,7 +191,7 @@ pub(crate) fn index_checked_from_collector( pack_read_header .len() .try_into() - .map_err(CommandErrorKind::ConversionFromIntFailed)?, + .map_err(|_err| todo!("Error transition"))?, ); let index_packs: Vec<_> = pack_read_header .into_iter() diff --git a/crates/core/src/commands/repair/snapshots.rs b/crates/core/src/commands/repair/snapshots.rs index ebf6509cd..22400743d 100644 --- a/crates/core/src/commands/repair/snapshots.rs +++ b/crates/core/src/commands/repair/snapshots.rs @@ -14,7 +14,7 @@ use crate::{ tree::{Tree, TreeId}, BlobId, BlobType, }, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, index::{indexer::Indexer, ReadGlobalIndex, ReadIndex}, progress::ProgressBars, repofile::{snapshotfile::SnapshotId, SnapshotFile, StringList}, @@ -280,7 +280,7 @@ pub(crate) fn repair_tree( (Some(id), Changed::None) => Ok((Changed::None, id)), (_, c) => { // the tree has been changed => save it - let (chunk, new_id) = tree.serialize()?; + let (chunk, new_id) = tree.serialize().map_err(|_err| todo!("Error transition"))?; if !index.has_tree(&new_id) && !dry_run { packer.add(chunk.into(), BlobId::from(*new_id))?; } diff --git a/crates/core/src/commands/repoinfo.rs b/crates/core/src/commands/repoinfo.rs index e105e305d..7aa8c95fc 100644 --- a/crates/core/src/commands/repoinfo.rs +++ b/crates/core/src/commands/repoinfo.rs @@ -4,7 +4,7 @@ use serde_with::skip_serializing_none; use crate::{ backend::{decrypt::DecryptReadBackend, FileType, ReadBackend, ALL_FILE_TYPES}, blob::{BlobType, BlobTypeMap}, - error::{RusticErrorKind, RusticResult}, + error::RusticResult, index::IndexEntry, progress::{Progress, ProgressBars}, repofile::indexfile::{IndexFile, IndexPack}, @@ -191,7 +191,7 @@ pub struct RepoFileInfo { pub(crate) fn collect_file_info(be: &impl ReadBackend) -> RusticResult> { let mut files = Vec::with_capacity(ALL_FILE_TYPES.len()); for tpe in ALL_FILE_TYPES { - let list = be.list_with_size(tpe).map_err(RusticErrorKind::Backend)?; + let list = be.list_with_size(tpe)?; let count = list.len() as u64; let size = list.iter().map(|f| u64::from(f.1)).sum(); files.push(RepoFileInfo { tpe, count, size }); diff --git a/crates/core/src/commands/restore.rs b/crates/core/src/commands/restore.rs index 033f97854..fcbb863d7 100644 --- a/crates/core/src/commands/restore.rs +++ b/crates/core/src/commands/restore.rs @@ -23,7 +23,7 @@ use crate::{ node::{Node, NodeType}, FileType, ReadBackend, }, - error::{CommandErrorKind, RusticResult}, + error::RusticResult, progress::{Progress, ProgressBars}, repofile::packfile::PackId, repository::{IndexedFull, IndexedTree, Open, Repository}, @@ -222,9 +222,11 @@ pub(crate) fn collect_and_prepare( stats.dirs.restore += 1; debug!("to restore: {path:?}"); if !dry_run { - dest.create_dir(path).map_err(|err| { - CommandErrorKind::ErrorCreating(path.clone(), Box::new(err)) - })?; + dest.create_dir(path) + .map_err(|err| { + CommandErrorKind::ErrorCreating(path.clone(), Box::new(err)) + }) + .map_err(|_err| todo!("Error transition"))?; } } } @@ -236,7 +238,8 @@ pub(crate) fn collect_and_prepare( .add_file(dest, node, path.clone(), repo, opts.verify_existing) .map_err(|err| { CommandErrorKind::ErrorCollecting(path.clone(), Box::new(err)) - })?, + }) + .map_err(|_err| todo!("Error transition"))?, ) { // Note that exists = false and Existing or Verified can happen if the file is changed between scanning the dir // and calling add_file. So we don't care about exists but trust add_file here. @@ -450,7 +453,8 @@ fn restore_contents( if *size == 0 { let path = &filenames[i]; dest.set_length(path, *size) - .map_err(|err| CommandErrorKind::ErrorSettingLength(path.clone(), Box::new(err)))?; + .map_err(|err| CommandErrorKind::ErrorSettingLength(path.clone(), Box::new(err))) + .map_err(|_err| todo!("Error transition"))?; } } @@ -494,7 +498,8 @@ fn restore_contents( let pool = ThreadPoolBuilder::new() .num_threads(constants::MAX_READER_THREADS_NUM) .build() - .map_err(CommandErrorKind::FromRayonError)?; + .map_err(CommandErrorKind::FromRayonError) + .map_err(|_err| todo!("Error transition"))?; pool.in_place_scope(|s| { for (pack, offset, length, from_file, name_dests) in blobs { let p = &p; @@ -544,6 +549,7 @@ fn restore_contents( Box::new(err), ) }) + .map_err(|_err| todo!("Error transition")) .unwrap(); sizes_guard[file_idx] = 0; } @@ -664,7 +670,8 @@ impl RestorePlan { if let Some(meta) = open_file .as_ref() .map(std::fs::File::metadata) - .transpose()? + .transpose() + .map_err(|_err| todo!("Error transition"))? { if meta.len() == 0 { // Empty file exists @@ -677,7 +684,8 @@ impl RestorePlan { if let Some(meta) = open_file .as_ref() .map(std::fs::File::metadata) - .transpose()? + .transpose() + .map_err(|_err| todo!("Error transition"))? { // TODO: This is the same logic as in backend/ignore.rs => consollidate! let mtime = meta @@ -706,8 +714,9 @@ impl RestorePlan { }; let length = bl.data_length(); - let usize_length = - usize::try_from(length).map_err(CommandErrorKind::ConversionFromIntFailed)?; + let usize_length = usize::try_from(length) + .map_err(CommandErrorKind::ConversionFromIntFailed) + .map_err(|_err| todo!("Error transition"))?; let matches = open_file .as_mut() diff --git a/crates/core/src/crypto.rs b/crates/core/src/crypto.rs index 89e885540..37a2a1202 100644 --- a/crates/core/src/crypto.rs +++ b/crates/core/src/crypto.rs @@ -1,8 +1,20 @@ -use crate::RusticResult; - pub(crate) mod aespoly1305; pub(crate) mod hasher; +/// [`CryptoErrorKind`] describes the errors that can happen while dealing with Cryptographic functions +#[derive(thiserror::Error, Debug, displaydoc::Display, Copy, Clone)] +#[non_exhaustive] +pub enum CryptoErrorKind { + /// data decryption failed: `{0:?}` + DataDecryptionFailed(aes256ctr_poly1305aes::aead::Error), + /// data encryption failed: `{0:?}` + DataEncryptionFailed(aes256ctr_poly1305aes::aead::Error), + /// crypto key too short + CryptoKeyTooShort, +} + +pub(crate) type CryptoResult = Result; + /// A trait for encrypting and decrypting data. pub trait CryptoKey: Clone + Copy + Sized + Send + Sync + 'static { /// Decrypt the given data. @@ -14,7 +26,7 @@ pub trait CryptoKey: Clone + Copy + Sized + Send + Sync + 'static { /// # Returns /// /// A vector containing the decrypted data. - fn decrypt_data(&self, data: &[u8]) -> RusticResult>; + fn decrypt_data(&self, data: &[u8]) -> CryptoResult>; /// Encrypt the given data. /// @@ -25,5 +37,5 @@ pub trait CryptoKey: Clone + Copy + Sized + Send + Sync + 'static { /// # Returns /// /// A vector containing the encrypted data. - fn encrypt_data(&self, data: &[u8]) -> RusticResult>; + fn encrypt_data(&self, data: &[u8]) -> CryptoResult>; } diff --git a/crates/core/src/crypto/aespoly1305.rs b/crates/core/src/crypto/aespoly1305.rs index c068f9b1f..5802d40c5 100644 --- a/crates/core/src/crypto/aespoly1305.rs +++ b/crates/core/src/crypto/aespoly1305.rs @@ -4,7 +4,7 @@ use aes256ctr_poly1305aes::{ }; use rand::{thread_rng, RngCore}; -use crate::{crypto::CryptoKey, error::CryptoErrorKind, error::RusticResult}; +use crate::crypto::{CryptoErrorKind, CryptoKey, CryptoResult}; pub(crate) type Nonce = aead::Nonce; pub(crate) type AeadKey = aes256ctr_poly1305aes::Key; @@ -82,7 +82,7 @@ impl CryptoKey for Key { /// # Errors /// /// If the MAC couldn't be checked. - fn decrypt_data(&self, data: &[u8]) -> RusticResult> { + fn decrypt_data(&self, data: &[u8]) -> CryptoResult> { if data.len() < 16 { return Err(CryptoErrorKind::CryptoKeyTooShort)?; } @@ -90,7 +90,7 @@ impl CryptoKey for Key { let nonce = Nonce::from_slice(&data[0..16]); Aes256CtrPoly1305Aes::new(&self.0) .decrypt(nonce, &data[16..]) - .map_err(|err| CryptoErrorKind::DataDecryptionFailed(err).into()) + .map_err(|err| CryptoErrorKind::DataDecryptionFailed(err)) } /// Returns the encrypted+MACed data from the given data. @@ -102,7 +102,7 @@ impl CryptoKey for Key { /// # Errors /// /// If the data could not be encrypted. - fn encrypt_data(&self, data: &[u8]) -> RusticResult> { + fn encrypt_data(&self, data: &[u8]) -> CryptoResult> { let mut nonce = Nonce::default(); thread_rng().fill_bytes(&mut nonce); diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 844b14f19..a6992b882 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -5,806 +5,416 @@ // use std::error::Error as StdError; // use std::fmt; +use derive_setters::Setters; use std::{ - error::Error, - ffi::OsString, - num::{ParseFloatError, ParseIntError, TryFromIntError}, - ops::RangeInclusive, - path::{PathBuf, StripPrefixError}, - process::ExitStatus, - str::Utf8Error, + backtrace::Backtrace, + fmt::{self, Display}, }; -#[cfg(not(windows))] -use nix::errno::Errno; +use crate::error::immut_str::ImmutStr; -use aes256ctr_poly1305aes::aead; -use chrono::OutOfRangeError; -use displaydoc::Display; -use thiserror::Error; - -use crate::{ - backend::node::NodeType, - blob::{tree::TreeId, BlobId}, - repofile::{indexfile::IndexPack, packfile::PackId}, -}; +pub(crate) mod constants { + pub const DEFAULT_DOCS_URL: &str = "https://rustic.cli.rs/docs/errors/"; + pub const DEFAULT_ISSUE_URL: &str = "https://github.com/rustic-rs/rustic_core/issues/new"; +} /// Result type that is being returned from methods that can fail and thus have [`RusticError`]s. -pub type RusticResult = Result; +pub type RusticResult = Result; -// [`Error`] is public, but opaque and easy to keep compatible. -#[derive(Error, Debug)] -#[error(transparent)] +#[derive(thiserror::Error, Debug, Setters)] +#[setters(strip_option)] +#[non_exhaustive] /// Errors that can result from rustic. -pub struct RusticError(#[from] pub(crate) RusticErrorKind); +pub struct RusticError { + /// The kind of the error. + kind: ErrorKind, -// Accessors for anything we do want to expose publicly. -impl RusticError { - /// Expose the inner error kind. - /// - /// This is useful for matching on the error kind. - pub fn into_inner(self) -> RusticErrorKind { - self.0 - } + /// Chain to the cause of the error. + source: Option>, - /// Checks if the error is due to an incorrect password - pub fn is_incorrect_password(&self) -> bool { - matches!( - self.0, - RusticErrorKind::Repository(RepositoryErrorKind::IncorrectPassword) - ) - } + /// The error message with guidance. + guidance: ImmutStr, - /// Get the corresponding backend error, if error is caused by the backend. - /// - /// Returns `anyhow::Error`; you need to cast this to the real backend error type - pub fn backend_error(&self) -> Option<&anyhow::Error> { - if let RusticErrorKind::Backend(error) = &self.0 { - Some(error) - } else { - None - } - } -} + /// The context of the error. + context: Vec<(&'static str, String)>, -/// [`RusticErrorKind`] describes the errors that can happen while executing a high-level command. -/// -/// This is a non-exhaustive enum, so additional variants may be added in future. It is -/// recommended to match against the wildcard `_` instead of listing all possible variants, -/// to avoid problems when new variants are added. -#[non_exhaustive] -#[derive(Error, Debug)] -pub enum RusticErrorKind { - /// [`CommandErrorKind`] describes the errors that can happen while executing a high-level command - #[error(transparent)] - Command(#[from] CommandErrorKind), - - /// [`CryptoErrorKind`] describes the errors that can happen while dealing with Cryptographic functions - #[error(transparent)] - Crypto(#[from] CryptoErrorKind), - - /// [`PolynomialErrorKind`] describes the errors that can happen while dealing with Polynomials - #[error(transparent)] - Polynomial(#[from] PolynomialErrorKind), - - /// [`IdErrorKind`] describes the errors that can be returned by processing IDs - #[error(transparent)] - Id(#[from] IdErrorKind), - - /// [`RepositoryErrorKind`] describes the errors that can be returned by processing Repositories - #[error(transparent)] - Repository(#[from] RepositoryErrorKind), - - /// [`IndexErrorKind`] describes the errors that can be returned by processing Indizes - #[error(transparent)] - Index(#[from] IndexErrorKind), - - /// describes the errors that can be returned by the various Backends - #[error(transparent)] - Backend(#[from] anyhow::Error), - - /// [`BackendAccessErrorKind`] describes the errors that can be returned by accessing the various Backends - #[error(transparent)] - BackendAccess(#[from] BackendAccessErrorKind), - - /// [`ConfigFileErrorKind`] describes the errors that can be returned for `ConfigFile`s - #[error(transparent)] - ConfigFile(#[from] ConfigFileErrorKind), - - /// [`KeyFileErrorKind`] describes the errors that can be returned for `KeyFile`s - #[error(transparent)] - KeyFile(#[from] KeyFileErrorKind), - - /// [`PackFileErrorKind`] describes the errors that can be returned for `PackFile`s - #[error(transparent)] - PackFile(#[from] PackFileErrorKind), - - /// [`SnapshotFileErrorKind`] describes the errors that can be returned for `SnapshotFile`s - #[error(transparent)] - SnapshotFile(#[from] SnapshotFileErrorKind), - - /// [`PackerErrorKind`] describes the errors that can be returned for a Packer - #[error(transparent)] - Packer(#[from] PackerErrorKind), - - /// [`FileErrorKind`] describes the errors that can happen while dealing with files during restore/backups - #[error(transparent)] - File(#[from] FileErrorKind), - - /// [`TreeErrorKind`] describes the errors that can come up dealing with Trees - #[error(transparent)] - Tree(#[from] TreeErrorKind), - - /// [`CacheBackendErrorKind`] describes the errors that can be returned by a Caching action in Backends - #[error(transparent)] - CacheBackend(#[from] CacheBackendErrorKind), - - /// [`CryptBackendErrorKind`] describes the errors that can be returned by a Decryption action in Backends - #[error(transparent)] - CryptBackend(#[from] CryptBackendErrorKind), - - /// [`IgnoreErrorKind`] describes the errors that can be returned by a Ignore action in Backends - #[error(transparent)] - Ignore(#[from] IgnoreErrorKind), - - /// [`LocalDestinationErrorKind`] describes the errors that can be returned by an action on the local filesystem as Destination - #[error(transparent)] - LocalDestination(#[from] LocalDestinationErrorKind), - - /// [`NodeErrorKind`] describes the errors that can be returned by an action utilizing a node in Backends - #[error(transparent)] - Node(#[from] NodeErrorKind), - - /// [`StdInErrorKind`] describes the errors that can be returned while dealing IO from CLI - #[error(transparent)] - StdIn(#[from] StdInErrorKind), - - /// [`ArchiverErrorKind`] describes the errors that can be returned from the archiver - #[error(transparent)] - ArchiverError(#[from] ArchiverErrorKind), - - /// [`VfsErrorKind`] describes the errors that can be returned from the Virtual File System - #[error(transparent)] - VfsError(#[from] VfsErrorKind), - - /// [`std::io::Error`] - #[error(transparent)] - StdIo(#[from] std::io::Error), -} + /// The URL of the documentation for the error. + docs_url: Option, -/// [`CommandErrorKind`] describes the errors that can happen while executing a high-level command -#[derive(Error, Debug, Display)] -pub enum CommandErrorKind { - /// path is no dir: `{0}` - PathIsNoDir(String), - /// used blobs are missing: blob `{0}` doesn't existing - BlobsMissing(BlobId), - /// used pack `{0}`: size does not match! Expected size: `{1}`, real size: `{2}` - PackSizeNotMatching(PackId, u32, u32), - /// used pack `{0}` does not exist! - PackNotExisting(PackId), - /// pack `{0}` got no decision what to do - NoDecision(PackId), - /// [`std::num::ParseFloatError`] - #[error(transparent)] - FromParseFloatError(#[from] ParseFloatError), - /// [`std::num::ParseIntError`] - #[error(transparent)] - FromParseIntError(#[from] ParseIntError), - /// Bytesize parser failed: `{0}` - FromByteSizeParser(String), - /// --repack-uncompressed makes no sense for v1 repo! - RepackUncompressedRepoV1, - /// datetime out of range: `{0}` - FromOutOfRangeError(#[from] OutOfRangeError), - /// node type `{0:?}` not supported by dump - DumpNotSupported(NodeType), - /// [`serde_json::Error`] - #[error(transparent)] - FromJsonError(#[from] serde_json::Error), - /// version `{0}` is not supported. Allowed values: {1:?} - VersionNotSupported(u32, RangeInclusive), - /// cannot downgrade version from `{0}` to `{1}` - CannotDowngrade(u32, u32), - /// compression level `{0}` is not supported for repo v1 - NoCompressionV1Repo(i32), - /// compression level `{0}` is not supported. Allowed values: `{1:?}` - CompressionLevelNotSupported(i32, RangeInclusive), - /// Size is too large: `{0}` - SizeTooLarge(bytesize::ByteSize), - /// min_packsize_tolerate_percent must be <= 100 - MinPackSizeTolerateWrong, - /// max_packsize_tolerate_percent must be >= 100 or 0" - MaxPackSizeTolerateWrong, - /// error creating `{0:?}`: `{1:?}` - ErrorCreating(PathBuf, Box), - /// error collecting information for `{0:?}`: `{1:?}` - ErrorCollecting(PathBuf, Box), - /// error setting length for `{0:?}`: `{1:?}` - ErrorSettingLength(PathBuf, Box), - /// [`rayon::ThreadPoolBuildError`] - #[error(transparent)] - FromRayonError(#[from] rayon::ThreadPoolBuildError), - /// Conversion from integer failed: `{0:?}` - ConversionFromIntFailed(TryFromIntError), - /// Not allowed on an append-only repository: `{0}` - NotAllowedWithAppendOnly(String), - /// Specify one of the keep-* options for forget! Please use keep-none to keep no snapshot. - NoKeepOption, - /// [`shell_words::ParseError`] - #[error(transparent)] - FromParseError(#[from] shell_words::ParseError), -} + /// Error code. + code: Option, -/// [`CryptoErrorKind`] describes the errors that can happen while dealing with Cryptographic functions -#[derive(Error, Debug, Display, Copy, Clone)] -pub enum CryptoErrorKind { - /// data decryption failed: `{0:?}` - DataDecryptionFailed(aead::Error), - /// data encryption failed: `{0:?}` - DataEncryptionFailed(aead::Error), - /// crypto key too short - CryptoKeyTooShort, -} + /// The URL of the issue tracker for opening a new issue. + new_issue_url: Option, -/// [`PolynomialErrorKind`] describes the errors that can happen while dealing with Polynomials -#[derive(Error, Debug, Display, Copy, Clone)] -pub enum PolynomialErrorKind { - /// no suitable polynomial found - NoSuitablePolynomialFound, -} + /// The URL of an already existing issue that is related to this error. + existing_issue_url: Option, -/// [`FileErrorKind`] describes the errors that can happen while dealing with files during restore/backups -#[derive(Error, Debug, Display)] -pub enum FileErrorKind { - /// transposing an Option of a Result into a Result of an Option failed: `{0:?}` - TransposingOptionResultFailed(std::io::Error), - /// conversion from `u64` to `usize` failed: `{0:?}` - ConversionFromU64ToUsizeFailed(TryFromIntError), -} + /// Severity of the error. + severity: Option, -/// [`IdErrorKind`] describes the errors that can be returned by processing IDs -#[derive(Error, Debug, Display, Copy, Clone)] -pub enum IdErrorKind { - /// Hex decoding error: `{0:?}` - HexError(hex::FromHexError), -} + /// The status of the error. + status: Option, -/// [`RepositoryErrorKind`] describes the errors that can be returned by processing Repositories -#[derive(Error, Debug, Display)] -pub enum RepositoryErrorKind { - /// No repository given. Please use the --repository option. - NoRepositoryGiven, - /// No password given. Please use one of the --password-* options. - NoPasswordGiven, - /// warm-up command must contain %id! - NoIDSpecified, - /// error opening password file `{0:?}` - OpeningPasswordFileFailed(std::io::Error), - /// No repository config file found. Is there a repo at `{0}`? - NoRepositoryConfigFound(String), - /// More than one repository config file at `{0}`. Aborting. - MoreThanOneRepositoryConfig(String), - /// keys from repo and repo-hot do not match for `{0}`. Aborting. - KeysDontMatchForRepositories(String), - /// repository is a hot repository!\nPlease use as --repo-hot in combination with the normal repo. Aborting. - HotRepositoryFlagMissing, - /// repo-hot is not a hot repository! Aborting. - IsNotHotRepository, - /// incorrect password! - IncorrectPassword, - /// error running the password command - PasswordCommandExecutionFailed, - /// error reading password from command - ReadingPasswordFromCommandFailed, - /// running command `{0}`:`{1}` was not successful: `{2}` - CommandExecutionFailed(String, String, std::io::Error), - /// running command {0}:{1} returned status: `{2}` - CommandErrorStatus(String, String, ExitStatus), - /// error listing the repo config file - ListingRepositoryConfigFileFailed, - /// error listing the repo keys - ListingRepositoryKeysFailed, - /// error listing the hot repo keys - ListingHotRepositoryKeysFailed, - /// error accessing config file - AccessToConfigFileFailed, - /// Thread pool build error: `{0:?}` - FromThreadPoolbilderError(rayon::ThreadPoolBuildError), - /// reading Password failed: `{0:?}` - ReadingPasswordFromReaderFailed(std::io::Error), - /// reading Password from prompt failed: `{0:?}` - ReadingPasswordFromPromptFailed(std::io::Error), - /// Config file already exists. Aborting. - ConfigFileExists, - /// did not find id `{0}` in index - IdNotFound(BlobId), - /// no suitable backend type found - NoBackendTypeGiven, + /// Backtrace of the error. + backtrace: Option, } -/// [`IndexErrorKind`] describes the errors that can be returned by processing Indizes -#[derive(Error, Debug, Display, Clone, Copy)] -pub enum IndexErrorKind { - /// blob not found in index - BlobInIndexNotFound, - /// failed to get a blob from the backend - GettingBlobIndexEntryFromBackendFailed, - /// saving IndexFile failed - SavingIndexFileFailed, -} +impl Display for RusticError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "An error occurred in `rustic_core`: {}", self.kind)?; + + write!(f, "\nMessage: {}", self.guidance)?; + + if !self.context.is_empty() { + write!(f, "\n\n Context:\n")?; + write!( + f, + "{}", + self.context + .iter() + .map(|(k, v)| format!("{k}: {v}")) + .collect::>() + .join(", ") + )?; + } -/// [`BackendAccessErrorKind`] describes the errors that can be returned by the various Backends -#[derive(Error, Debug, Display)] -pub enum BackendAccessErrorKind { - /// backend `{0:?}` is not supported! - BackendNotSupported(String), - /// backend `{0}` cannot be loaded: {1:?} - BackendLoadError(String, anyhow::Error), - /// no suitable id found for `{0}` - NoSuitableIdFound(String), - /// id `{0}` is not unique - IdNotUnique(String), - /// [`std::io::Error`] - #[error(transparent)] - FromIoError(#[from] std::io::Error), - /// [`std::num::TryFromIntError`] - #[error(transparent)] - FromTryIntError(#[from] TryFromIntError), - /// [`LocalDestinationErrorKind`] - #[error(transparent)] - FromLocalError(#[from] LocalDestinationErrorKind), - /// [`IdErrorKind`] - #[error(transparent)] - FromIdError(#[from] IdErrorKind), - /// [`IndexErrorKind`] - #[error(transparent)] - FromIgnoreError(#[from] IgnoreErrorKind), - /// [`CryptBackendErrorKind`] - #[error(transparent)] - FromBackendDecryptionError(#[from] CryptBackendErrorKind), - /// [`ignore::Error`] - #[error(transparent)] - GenericError(#[from] ignore::Error), - /// creating data in backend failed - CreatingDataOnBackendFailed, - /// writing bytes to backend failed - WritingBytesToBackendFailed, - /// removing data from backend failed - RemovingDataFromBackendFailed, - /// failed to list files on Backend - ListingFilesOnBackendFailed, - /// Path is not allowed: `{0:?}` - PathNotAllowed(PathBuf), -} + if let Some(cause) = &self.source { + write!(f, "\n\nCaused by: {cause}")?; + } -/// [`ConfigFileErrorKind`] describes the errors that can be returned for `ConfigFile`s -#[derive(Error, Debug, Display)] -pub enum ConfigFileErrorKind { - /// config version not supported! - ConfigVersionNotSupported, - /// Parsing Polynomial in config failed: `{0:?}` - ParsingFailedForPolynomial(#[from] ParseIntError), -} + if let Some(severity) = &self.severity { + write!(f, "\n\nSeverity: {severity:?}", severity = severity)?; + } -/// [`KeyFileErrorKind`] describes the errors that can be returned for `KeyFile`s -#[derive(Error, Debug, Display)] -pub enum KeyFileErrorKind { - /// no suitable key found! - NoSuitableKeyFound, - /// listing KeyFiles failed - ListingKeyFilesFailed, - /// couldn't get KeyFile from backend - CouldNotGetKeyFileFromBackend, - /// serde_json couldn't deserialize the data: `{0:?}` - DeserializingFromSliceFailed(serde_json::Error), - /// couldn't encrypt data: `{0:?}` - CouldNotEncryptData(#[from] CryptoErrorKind), - /// serde_json couldn't serialize the data into a JSON byte vector: `{0:?}` - CouldNotSerializeAsJsonByteVector(serde_json::Error), - /// conversion from `u32` to `u8` failed: `{0:?}` - ConversionFromU32ToU8Failed(TryFromIntError), - /// output length is invalid: `{0:?}` - OutputLengthInvalid(scrypt::errors::InvalidOutputLen), - /// invalid scrypt parameters: `{0:?}` - InvalidSCryptParameters(scrypt::errors::InvalidParams), -} + if let Some(status) = &self.status { + write!(f, "\n\nStatus: {status:?}")?; + } -/// [`PackFileErrorKind`] describes the errors that can be returned for `PackFile`s -#[derive(Error, Debug, Display)] -pub enum PackFileErrorKind { - /// Failed reading binary representation of the pack header: `{0:?}` - ReadingBinaryRepresentationFailed(binrw::Error), - /// Failed writing binary representation of the pack header: `{0:?}` - WritingBinaryRepresentationFailed(binrw::Error), - /// Read header length is too large! Length: `{size_real}`, file size: `{pack_size}` - HeaderLengthTooLarge { size_real: u32, pack_size: u32 }, - /// Read header length doesn't match header contents! Length: `{size_real}`, computed: `{size_computed}` - HeaderLengthDoesNotMatchHeaderContents { size_real: u32, size_computed: u32 }, - /// pack size computed from header doesn't match real pack isch! Computed: `{size_computed}`, real: `{size_real}` - HeaderPackSizeComputedDoesNotMatchRealPackFile { size_real: u32, size_computed: u32 }, - /// partially reading the pack header from packfile failed: `{0:?}` - ListingKeyFilesFailed(#[from] BackendAccessErrorKind), - /// decrypting from binary failed - BinaryDecryptionFailed, - /// Partial read of PackFile failed - PartialReadOfPackfileFailed, - /// writing Bytes failed - WritingBytesFailed, - /// [`CryptBackendErrorKind`] - #[error(transparent)] - PackDecryptionFailed(#[from] CryptBackendErrorKind), -} + if let Some(code) = &self.code { + let default_docs_url = ImmutStr::from(constants::DEFAULT_DOCS_URL); + let docs_url = self.docs_url.as_ref().unwrap_or(&default_docs_url); -/// [`SnapshotFileErrorKind`] describes the errors that can be returned for `SnapshotFile`s -#[derive(Error, Debug, Display)] -pub enum SnapshotFileErrorKind { - /// non-unicode hostname `{0:?}` - NonUnicodeHostname(OsString), - /// non-unicode path `{0:?}` - NonUnicodePath(PathBuf), - /// no snapshots found - NoSnapshotsFound, - /// value `{0:?}` not allowed - ValueNotAllowed(String), - /// datetime out of range: `{0:?}` - OutOfRange(#[from] OutOfRangeError), - /// reading the description file failed: `{0:?}` - ReadingDescriptionFailed(#[from] std::io::Error), - /// getting the SnapshotFile from the backend failed - GettingSnapshotFileFailed, - /// getting the SnapshotFile by ID failed - GettingSnapshotFileByIdFailed, - /// unpacking SnapshotFile result failed - UnpackingSnapshotFileResultFailed, - /// collecting IDs failed: `{0:?}` - FindingIdsFailed(Vec), - /// removing dots from paths failed: `{0:?}` - RemovingDotsFromPathFailed(std::io::Error), - /// canonicalizing path failed: `{0:?}` - CanonicalizingPathFailed(std::io::Error), -} + write!(f, "\n\nFor more information, see: {docs_url}/{code}")?; + } -/// [`PackerErrorKind`] describes the errors that can be returned for a Packer -#[derive(Error, Debug, Display)] -pub enum PackerErrorKind { - /// error returned by cryptographic libraries: `{0:?}` - CryptoError(#[from] CryptoErrorKind), - /// could not compress due to unsupported config version: `{0:?}` - ConfigVersionNotSupported(#[from] ConfigFileErrorKind), - /// compressing data failed: `{0:?}` - CompressingDataFailed(#[from] std::io::Error), - /// getting total size failed - GettingTotalSizeFailed, - /// [`crossbeam_channel::SendError`] - #[error(transparent)] - SendingCrossbeamMessageFailed( - #[from] crossbeam_channel::SendError<(bytes::Bytes, BlobId, Option)>, - ), - /// [`crossbeam_channel::SendError`] - #[error(transparent)] - SendingCrossbeamMessageFailedForIndexPack( - #[from] crossbeam_channel::SendError<(bytes::Bytes, IndexPack)>, - ), - /// couldn't create binary representation for pack header: `{0:?}` - CouldNotCreateBinaryRepresentationForHeader(#[from] PackFileErrorKind), - /// failed to write bytes in backend: `{0:?}` - WritingBytesFailedInBackend(#[from] BackendAccessErrorKind), - /// failed to write bytes for PackFile: `{0:?}` - WritingBytesFailedForPackFile(PackFileErrorKind), - /// failed to read partially encrypted data: `{0:?}` - ReadingPartiallyEncryptedDataFailed(#[from] CryptBackendErrorKind), - /// failed to partially read data: `{0:?}` - PartiallyReadingDataFailed(PackFileErrorKind), - /// failed to add index pack: `{0:?}` - AddingIndexPackFailed(#[from] IndexErrorKind), - /// conversion for integer failed: `{0:?}` - IntConversionFailed(#[from] TryFromIntError), -} + if let Some(existing_issue_url) = &self.existing_issue_url { + write!(f, "\n\nThis might be a related issue, please check it for a possible workaround and/or further guidance: {existing_issue_url}")?; + } -/// [`TreeErrorKind`] describes the errors that can come up dealing with Trees -#[derive(Error, Debug, Display)] -pub enum TreeErrorKind { - /// blob `{0}` not found in index - BlobIdNotFound(TreeId), - /// `{0:?}` is not a directory - NotADirectory(OsString), - /// Path `{0:?}` not found - PathNotFound(OsString), - /// path should not contain current or parent dir - ContainsCurrentOrParentDirectory, - /// serde_json couldn't serialize the tree: `{0:?}` - SerializingTreeFailed(#[from] serde_json::Error), - /// serde_json couldn't deserialize tree from bytes of JSON text: `{0:?}` - DeserializingTreeFailed(serde_json::Error), - /// reading blob data failed `{0:?}` - ReadingBlobDataFailed(#[from] IndexErrorKind), - /// slice is not UTF-8: `{0:?}` - PathIsNotUtf8Conform(#[from] Utf8Error), - /// error in building nodestreamer: `{0:?}` - BuildingNodeStreamerFailed(#[from] ignore::Error), - /// failed to read file string from glob file: `{0:?}` - ReadingFileStringFromGlobsFailed(#[from] std::io::Error), - /// [`crossbeam_channel::SendError`] - #[error(transparent)] - SendingCrossbeamMessageFailed(#[from] crossbeam_channel::SendError<(PathBuf, TreeId, usize)>), - /// [`crossbeam_channel::RecvError`] - #[error(transparent)] - ReceivingCrossbreamMessageFailed(#[from] crossbeam_channel::RecvError), -} + let default_issue_url = ImmutStr::from(constants::DEFAULT_ISSUE_URL); + let new_issue_url = self.new_issue_url.as_ref().unwrap_or(&default_issue_url); -/// [`CacheBackendErrorKind`] describes the errors that can be returned by a Caching action in Backends -#[derive(Error, Debug, Display)] -pub enum CacheBackendErrorKind { - /// no cache dir - NoCacheDirectory, - /// [`std::io::Error`] - #[error(transparent)] - FromIoError(#[from] std::io::Error), - /// setting option on CacheBackend failed - SettingOptionOnCacheBackendFailed, - /// listing with size on CacheBackend failed - ListingWithSizeOnCacheBackendFailed, - /// fully reading from CacheBackend failed - FullyReadingFromCacheBackendFailed, - /// partially reading from CacheBackend failed - PartiallyReadingFromBackendDataFailed, - /// creating data on CacheBackend failed - CreatingDataOnCacheBackendFailed, - /// writing bytes on CacheBackend failed - WritingBytesOnCacheBackendFailed, - /// removing data on CacheBackend failed - RemovingDataOnCacheBackendFailed, -} + write!( + f, + "\n\nIf you think this is an undiscovered bug, please open an issue at: {new_issue_url}" + )?; -/// [`CryptBackendErrorKind`] describes the errors that can be returned by a Decryption action in Backends -#[derive(Error, Debug, Display)] -pub enum CryptBackendErrorKind { - /// decryption not supported for backend - DecryptionNotSupportedForBackend, - /// length of uncompressed data does not match! - LengthOfUncompressedDataDoesNotMatch, - /// failed to read encrypted data during full read - DecryptionInFullReadFailed, - /// failed to read encrypted data during partial read - DecryptionInPartialReadFailed, - /// decrypting from backend failed - DecryptingFromBackendFailed, - /// deserializing from bytes of JSON Text failed: `{0:?}` - DeserializingFromBytesOfJsonTextFailed(serde_json::Error), - /// failed to write data in crypt backend - WritingDataInCryptBackendFailed, - /// failed to list Ids - ListingIdsInDecryptionBackendFailed, - /// [`CryptoErrorKind`] - #[error(transparent)] - FromKey(#[from] CryptoErrorKind), - /// [`std::io::Error`] - #[error(transparent)] - FromIo(#[from] std::io::Error), - /// [`serde_json::Error`] - #[error(transparent)] - FromJson(#[from] serde_json::Error), - /// writing full hash failed in CryptBackend - WritingFullHashFailed, - /// decoding Zstd compressed data failed: `{0:?}` - DecodingZstdCompressedDataFailed(std::io::Error), - /// Serializing to JSON byte vector failed: `{0:?}` - SerializingToJsonByteVectorFailed(serde_json::Error), - /// encrypting data failed - EncryptingDataFailed, - /// Compressing and appending data failed: `{0:?}` - CopyEncodingDataFailed(std::io::Error), - /// conversion for integer failed: `{0:?}` - IntConversionFailed(#[from] TryFromIntError), - /// Extra verification failed: After decrypting and decompressing the data changed! - ExtraVerificationFailed, -} + if let Some(backtrace) = &self.backtrace { + write!(f, "\n\nBacktrace:\n{:?}", backtrace)?; + } -/// [`IgnoreErrorKind`] describes the errors that can be returned by a Ignore action in Backends -#[derive(Error, Debug, Display)] -pub enum IgnoreErrorKind { - /// generic Ignore error: `{0:?}` - GenericError(#[from] ignore::Error), - /// Error reading glob file `{file:?}`: `{source:?}` - ErrorGlob { - file: PathBuf, - source: std::io::Error, - }, - /// Unable to open file `{file:?}`: `{source:?}` - UnableToOpenFile { - file: PathBuf, - source: std::io::Error, - }, - /// Error getting xattrs for `{path:?}`: `{source:?}` - ErrorXattr { - path: PathBuf, - source: std::io::Error, - }, - /// Error reading link target for `{path:?}`: `{source:?}` - ErrorLink { - path: PathBuf, - source: std::io::Error, - }, - /// [`std::num::TryFromIntError`] - #[error(transparent)] - FromTryFromIntError(#[from] TryFromIntError), + Ok(()) + } } -/// [`LocalDestinationErrorKind`] describes the errors that can be returned by an action on the filesystem in Backends -#[derive(Error, Debug, Display)] -pub enum LocalDestinationErrorKind { - /// directory creation failed: `{0:?}` - DirectoryCreationFailed(#[from] std::io::Error), - /// file `{0:?}` should have a parent - FileDoesNotHaveParent(PathBuf), - /// [`std::num::TryFromIntError`] - #[error(transparent)] - FromTryIntError(#[from] TryFromIntError), - /// [`IdErrorKind`] - #[error(transparent)] - FromIdError(#[from] IdErrorKind), - /// [`walkdir::Error`] - #[error(transparent)] - FromWalkdirError(#[from] walkdir::Error), - /// [`Errno`] - #[error(transparent)] - #[cfg(not(windows))] - FromErrnoError(#[from] Errno), - /// listing xattrs on `{path:?}`: `{source:?}` - #[cfg(not(any(windows, target_os = "openbsd")))] - ListingXattrsFailed { - path: PathBuf, - source: std::io::Error, - }, - /// setting xattr `{name}` on `{filename:?}` with `{source:?}` - #[cfg(not(any(windows, target_os = "openbsd")))] - SettingXattrFailed { - name: String, - filename: PathBuf, - source: std::io::Error, - }, - /// getting xattr `{name}` on `{filename:?}` with `{source:?}` - #[cfg(not(any(windows, target_os = "openbsd")))] - GettingXattrFailed { - name: String, - filename: PathBuf, - source: std::io::Error, - }, - /// removing directories failed: `{0:?}` - DirectoryRemovalFailed(std::io::Error), - /// removing file failed: `{0:?}` - FileRemovalFailed(std::io::Error), - /// setting time metadata failed: `{0:?}` - SettingTimeMetadataFailed(std::io::Error), - /// opening file failed: `{0:?}` - OpeningFileFailed(std::io::Error), - /// setting file length failed: `{0:?}` - SettingFileLengthFailed(std::io::Error), - /// can't jump to position in file: `{0:?}` - CouldNotSeekToPositionInFile(std::io::Error), - /// couldn't write to buffer: `{0:?}` - CouldNotWriteToBuffer(std::io::Error), - /// reading exact length of file contents failed: `{0:?}` - ReadingExactLengthOfFileFailed(std::io::Error), - /// setting file permissions failed: `{0:?}` - #[cfg(not(windows))] - SettingFilePermissionsFailed(std::io::Error), - /// failed to symlink target `{linktarget:?}` from `{filename:?}` with `{source:?}` - #[cfg(not(windows))] - SymlinkingFailed { - linktarget: PathBuf, - filename: PathBuf, - source: std::io::Error, - }, +// Accessors for anything we do want to expose publicly. +impl RusticError { + pub fn new(kind: ErrorKind, guidance: impl Into) -> Self { + Self { + kind, + guidance: guidance.into().into(), + context: Vec::default(), + source: None, + code: None, + docs_url: None, + new_issue_url: None, + existing_issue_url: None, + severity: None, + status: None, + // `Backtrace::capture()` will check if backtrace has been enabled + // internally. It's zero cost if backtrace is disabled. + backtrace: Some(Backtrace::capture()), + } + } + + /// Expose the inner error kind. + /// + /// This is useful for matching on the error kind. + pub fn into_inner(self) -> ErrorKind { + self.kind + } + + /// Checks if the error is due to an incorrect password + pub fn is_incorrect_password(&self) -> bool { + matches!(self.kind, ErrorKind::Password) + } + + pub fn from( + error: T, + kind: ErrorKind, + ) -> Self { + Self { + kind, + guidance: error.to_string().into(), + context: Vec::default(), + source: Some(Box::new(error)), + code: None, + docs_url: None, + new_issue_url: None, + existing_issue_url: None, + severity: None, + status: None, + // `Backtrace::capture()` will check if backtrace has been enabled + // internally. It's zero cost if backtrace is disabled. + backtrace: Some(Backtrace::capture()), + } + } + + pub fn add_context(mut self, key: &'static str, value: impl Into) -> Self { + self.context.push((key, value.into())); + self + } } -/// [`NodeErrorKind`] describes the errors that can be returned by an action utilizing a node in Backends -#[derive(Error, Debug, Display)] -pub enum NodeErrorKind { - /// Parsing integer failed: `{0:?}` - FromParseIntError(#[from] ParseIntError), - /// Unexpected EOF - #[cfg(not(windows))] - UnexpectedEOF, - /// Invalid unicode - #[cfg(not(windows))] - InvalidUnicode, - /// Unrecognized Escape - #[cfg(not(windows))] - UnrecognizedEscape, +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Severity { + Info, + Warning, + Error, + Fatal, } -/// [`StdInErrorKind`] describes the errors that can be returned while dealing IO from CLI -#[derive(Error, Debug, Display)] -pub enum StdInErrorKind { - /// error reading from stdin: `{0:?}` - StdInError(#[from] std::io::Error), +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Status { + Permanent, + Temporary, + Persistent, } -/// [`ArchiverErrorKind`] describes the errors that can be returned from the archiver -#[derive(Error, Debug, Display)] -pub enum ArchiverErrorKind { - /// tree stack empty - TreeStackEmpty, - /// cannot open file - OpeningFileFailed, - /// option should contain a value, but contained `None` - UnpackingTreeTypeOptionalFailed, - /// couldn't get size for archive: `{0:?}` - CouldNotGetSizeForArchive(#[from] BackendAccessErrorKind), - /// couldn't determine size for item in Archiver - CouldNotDetermineSize, - /// failed to save index: `{0:?}` - IndexSavingFailed(#[from] IndexErrorKind), - /// failed to save file in backend: `{0:?}` - FailedToSaveFileInBackend(#[from] CryptBackendErrorKind), - /// finalizing SnapshotSummary failed: `{0:?}` - FinalizingSnapshotSummaryFailed(#[from] SnapshotFileErrorKind), - /// [`PackerErrorKind`] - #[error(transparent)] - FromPacker(#[from] PackerErrorKind), - /// [`TreeErrorKind`] - #[error(transparent)] - FromTree(#[from] TreeErrorKind), - /// [`ConfigFileErrorKind`] - #[error(transparent)] - FromConfigFile(#[from] ConfigFileErrorKind), - /// [`std::io::Error`] - #[error(transparent)] - FromStdIo(#[from] std::io::Error), - /// [`StripPrefixError`] - #[error(transparent)] - FromStripPrefix(#[from] StripPrefixError), - /// conversion from `u64` to `usize` failed: `{0:?}` - ConversionFromU64ToUsizeFailed(TryFromIntError), +/// [`ErrorKind`] describes the errors that can happen while executing a high-level command. +/// +/// This is a non-exhaustive enum, so additional variants may be added in future. It is +/// recommended to match against the wildcard `_` instead of listing all possible variants, +/// to avoid problems when new variants are added. +#[non_exhaustive] +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum ErrorKind { + /// Backend Error + Backend, + /// IO Error + Io, + /// Password Error + Password, + /// Repository Error + Repository, + /// Command Error + Command, + /// Config Error + Config, + /// Index Error + Index, + /// Key Error + Key, + /// Blob Error + Blob, + /// Crypto Error + Crypto, + /// Compression Error + Compression, + /// Parsing Error + Parsing, + /// Conversion Error + Conversion, + /// Permission Error + Permission, + /// Polynomial Error + Polynomial, + // /// The repository password is incorrect. Please try again. + // IncorrectRepositoryPassword, + // /// No repository given. Please use the --repository option. + // NoRepositoryGiven, + // /// No password given. Please use one of the --password-* options. + // NoPasswordGiven, + // /// warm-up command must contain %id! + // NoIDSpecified, + // /// error opening password file `{0:?}` + // OpeningPasswordFileFailed(std::io::Error), + // /// No repository config file found. Is there a repo at `{0}`? + // NoRepositoryConfigFound(String), + // /// More than one repository config file at `{0}`. Aborting. + // MoreThanOneRepositoryConfig(String), + // /// keys from repo and repo-hot do not match for `{0}`. Aborting. + // KeysDontMatchForRepositories(String), + // /// repository is a hot repository!\nPlease use as --repo-hot in combination with the normal repo. Aborting. + // HotRepositoryFlagMissing, + // /// repo-hot is not a hot repository! Aborting. + // IsNotHotRepository, + // /// incorrect password! + // IncorrectPassword, + // /// error running the password command + // PasswordCommandExecutionFailed, + // /// error reading password from command + // ReadingPasswordFromCommandFailed, + // /// running command `{0}`:`{1}` was not successful: `{2}` + // CommandExecutionFailed(String, String, std::io::Error), + // /// running command {0}:{1} returned status: `{2}` + // CommandErrorStatus(String, String, ExitStatus), + // /// error listing the repo config file + // ListingRepositoryConfigFileFailed, + // /// error listing the repo keys + // ListingRepositoryKeysFailed, + // /// error listing the hot repo keys + // ListingHotRepositoryKeysFailed, + // /// error accessing config file + // AccessToConfigFileFailed, + // /// Thread pool build error: `{0:?}` + // FromThreadPoolbilderError(rayon::ThreadPoolBuildError), + // /// reading Password failed: `{0:?}` + // ReadingPasswordFromReaderFailed(std::io::Error), + // /// reading Password from prompt failed: `{0:?}` + // ReadingPasswordFromPromptFailed(std::io::Error), + // /// Config file already exists. Aborting. + // ConfigFileExists, + // /// did not find id `{0}` in index + // IdNotFound(BlobId), + // /// no suitable backend type found + // NoBackendTypeGiven, + // /// Hex decoding error: `{0:?}` + // HexError(hex::FromHexError), } -/// [`VfsErrorKind`] describes the errors that can be returned from the Virtual File System -#[derive(Error, Debug, Display)] -pub enum VfsErrorKind { - /// No directory entries for symlink found: `{0:?}` - NoDirectoryEntriesForSymlinkFound(OsString), - /// Directory exists as non-virtual directory - DirectoryExistsAsNonVirtual, - /// Only normal paths allowed - OnlyNormalPathsAreAllowed, - /// Name `{0:?}`` doesn't exist - NameDoesNotExist(OsString), +// TODO: Possible more general categories for errors for RusticErrorKind (WIP): +// +// - **JSON Parsing Errors**: e.g., `serde_json::Error` +// - **Version Errors**: e.g., `VersionNotSupported`, `CannotDowngrade` +// - **Compression Errors**: e.g., `NoCompressionV1Repo`, `CompressionLevelNotSupported` +// - **Size Errors**: e.g., `SizeTooLarge` +// - **File and Path Errors**: e.g., `ErrorCreating`, `ErrorCollecting`, `ErrorSettingLength` +// - **Thread Pool Errors**: e.g., `rayon::ThreadPoolBuildError` +// - **Conversion Errors**: e.g., `ConversionFromIntFailed` +// - **Permission Errors**: e.g., `NotAllowedWithAppendOnly` +// - **Parsing Errors**: e.g., `shell_words::ParseError` +// - **Cryptographic Errors**: e.g., `DataDecryptionFailed`, `DataEncryptionFailed`, `CryptoKeyTooShort` +// - **Polynomial Errors**: e.g., `NoSuitablePolynomialFound` +// - **File Handling Errors**: e.g., `TransposingOptionResultFailed`, `ConversionFromU64ToUsizeFailed` +// - **ID Processing Errors**: e.g., `HexError` +// - **Repository Errors**: general repository-related errors +// - **Backend Access Errors**: e.g., `BackendNotSupported`, `BackendLoadError`, `NoSuitableIdFound`, `IdNotUnique` +// - **Rclone Errors**: e.g., `NoOutputForRcloneVersion`, `NoStdOutForRclone`, `RCloneExitWithBadStatus` +// - **REST API Errors**: e.g., `NotSupportedForRetry`, `UrlParsingFailed` + +pub mod immut_str { + //! Copyright 2024 Cloudflare, Inc. + //! + //! Licensed under the Apache License, Version 2.0 (the "License"); + //! you may not use this file except in compliance with the License. + //! You may obtain a copy of the License at + //! + //! http://www.apache.org/licenses/LICENSE-2.0 + //! + //! Unless required by applicable law or agreed to in writing, software + //! distributed under the License is distributed on an "AS IS" BASIS, + //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + //! See the License for the specific language governing permissions and + //! limitations under the License. + //! + //! Taken from + + use std::fmt; + + /// A data struct that holds either immutable string or reference to static str. + /// Compared to String or `Box`, it avoids memory allocation on static str. + #[derive(Debug, PartialEq, Eq, Clone)] + pub enum ImmutStr { + Static(&'static str), + Owned(Box), + } + + impl ImmutStr { + #[inline] + pub fn as_str(&self) -> &str { + match self { + ImmutStr::Static(s) => s, + ImmutStr::Owned(s) => s.as_ref(), + } + } + + pub fn is_owned(&self) -> bool { + match self { + ImmutStr::Static(_) => false, + ImmutStr::Owned(_) => true, + } + } + } + + impl fmt::Display for ImmutStr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } + } + + impl From<&'static str> for ImmutStr { + fn from(s: &'static str) -> Self { + ImmutStr::Static(s) + } + } + + impl From for ImmutStr { + fn from(s: String) -> Self { + ImmutStr::Owned(s.into_boxed_str()) + } + } + + #[cfg(test)] + mod tests { + use super::*; + + #[test] + fn test_static_vs_owned() { + let s: ImmutStr = "test".into(); + assert!(!s.is_owned()); + let s: ImmutStr = "test".to_string().into(); + assert!(s.is_owned()); + } + } } -trait RusticErrorMarker: Error {} - -impl RusticErrorMarker for CryptoErrorKind {} -impl RusticErrorMarker for PolynomialErrorKind {} -impl RusticErrorMarker for IdErrorKind {} -impl RusticErrorMarker for RepositoryErrorKind {} -impl RusticErrorMarker for IndexErrorKind {} -impl RusticErrorMarker for BackendAccessErrorKind {} -impl RusticErrorMarker for ConfigFileErrorKind {} -impl RusticErrorMarker for KeyFileErrorKind {} -impl RusticErrorMarker for PackFileErrorKind {} -impl RusticErrorMarker for SnapshotFileErrorKind {} -impl RusticErrorMarker for PackerErrorKind {} -impl RusticErrorMarker for FileErrorKind {} -impl RusticErrorMarker for TreeErrorKind {} -impl RusticErrorMarker for CacheBackendErrorKind {} -impl RusticErrorMarker for CryptBackendErrorKind {} -impl RusticErrorMarker for IgnoreErrorKind {} -impl RusticErrorMarker for LocalDestinationErrorKind {} -impl RusticErrorMarker for NodeErrorKind {} -impl RusticErrorMarker for StdInErrorKind {} -impl RusticErrorMarker for ArchiverErrorKind {} -impl RusticErrorMarker for CommandErrorKind {} -impl RusticErrorMarker for VfsErrorKind {} -impl RusticErrorMarker for std::io::Error {} - -impl From for RusticError -where - E: RusticErrorMarker, - RusticErrorKind: From, -{ - fn from(value: E) -> Self { - Self(RusticErrorKind::from(value)) +#[cfg(test)] +mod tests { + use std::sync::LazyLock; + + use super::*; + + static TEST_ERROR: LazyLock = LazyLock::new(|| RusticError { + kind: ErrorKind::Io, + guidance: + "A file could not be read, make sure the file is existing and readable by the system." + .to_string(), + status: Some(Status::Permanent), + severity: Some(Severity::Error), + code: Some("E001".to_string().into()), + context: vec![ + ("path", "/path/to/file".to_string()), + ("called", "used s3 backend".to_string()), + ], + source: Some(Box::new(std::io::Error::new( + std::io::ErrorKind::Other, + "networking error", + ))), + backtrace: Some(Backtrace::disabled()), + docs_url: None, + new_issue_url: None, + existing_issue_url: None, + }); + + #[test] + fn test_error_display() { + todo!("Implement test_error_display"); + } + + #[test] + fn test_error_debug() { + todo!("Implement test_error_debug"); } } diff --git a/crates/core/src/id.rs b/crates/core/src/id.rs index 6f9c59e51..6f1de2ebc 100644 --- a/crates/core/src/id.rs +++ b/crates/core/src/id.rs @@ -7,7 +7,10 @@ use derive_more::{Constructor, Display}; use rand::{thread_rng, RngCore}; use serde_derive::{Deserialize, Serialize}; -use crate::{crypto::hasher::hash, error::IdErrorKind, RusticError, RusticResult}; +use crate::{ + crypto::hasher::hash, + error::{RusticError, ErrorKind, RusticResult}, +}; pub(super) mod constants { /// The length of the hash in bytes @@ -40,6 +43,14 @@ macro_rules! define_new_id_struct { )] #[serde(transparent)] pub struct $a($crate::Id); + + impl $a { + /// impl into_inner + #[must_use] + pub fn into_inner(self) -> $crate::Id { + self.0 + } + } }; } @@ -74,7 +85,12 @@ impl FromStr for Id { type Err = RusticError; fn from_str(s: &str) -> Result { let mut id = Self::default(); - hex::decode_to_slice(s, &mut id.0).map_err(IdErrorKind::HexError)?; + hex::decode_to_slice(s, &mut id.0).map_err(|err| { + RusticError::new(ErrorKind::Parsing, + format!("Failed to decode hex string into Id. The string must be a valid hexadecimal string: {s}") + ).source(err.into()) + })?; + Ok(id) } } diff --git a/crates/core/src/index.rs b/crates/core/src/index.rs index 60630a083..3ab84bfde 100644 --- a/crates/core/src/index.rs +++ b/crates/core/src/index.rs @@ -4,21 +4,37 @@ use bytes::Bytes; use derive_more::Constructor; use crate::{ - backend::{decrypt::DecryptReadBackend, FileType}, + backend::{decrypt::DecryptReadBackend, CryptBackendErrorKind, FileType}, blob::{tree::TreeId, BlobId, BlobType, DataId}, - error::IndexErrorKind, + error::RusticResult, index::binarysorted::{Index, IndexCollector, IndexType}, progress::Progress, repofile::{ indexfile::{IndexBlob, IndexFile}, packfile::PackId, }, - RusticResult, }; pub(crate) mod binarysorted; pub(crate) mod indexer; +/// [`IndexErrorKind`] describes the errors that can be returned by processing Indizes +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum IndexErrorKind { + /// blob not found in index + BlobInIndexNotFound, + /// failed to get a blob from the backend + GettingBlobIndexEntryFromBackendFailed, + /// saving IndexFile failed + SavingIndexFileFailed { + /// the error that occurred + source: CryptBackendErrorKind, + }, +} + +pub(crate) type IndexResult = Result; + /// An entry in the index #[derive(Debug, Clone, Copy, PartialEq, Eq, Constructor)] pub struct IndexEntry { @@ -70,6 +86,7 @@ impl IndexEntry { self.length, self.uncompressed_length, )?; + Ok(data) } @@ -185,7 +202,7 @@ pub trait ReadIndex { id: &BlobId, ) -> RusticResult { self.get_id(tpe, id).map_or_else( - || Err(IndexErrorKind::BlobInIndexNotFound.into()), + || Err(IndexErrorKind::BlobInIndexNotFound).map_err(|_err| todo!("Error transition")), |ie| ie.read_data(be), ) } diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index ea763bdb3..975911687 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -145,7 +145,7 @@ pub use crate::{ repoinfo::{BlobInfo, IndexInfos, PackInfo, RepoFileInfo, RepoFileInfos}, restore::{FileDirStats, RestoreOptions, RestorePlan, RestoreStats}, }, - error::{RusticError, RusticResult}, + error::{ErrorKind, RusticError, RusticResult, Severity}, id::{HexId, Id}, progress::{NoProgress, NoProgressBars, Progress, ProgressBars}, repofile::snapshotfile::{ diff --git a/crates/core/src/repofile/configfile.rs b/crates/core/src/repofile/configfile.rs index 278a95bf7..8a19899fa 100644 --- a/crates/core/src/repofile/configfile.rs +++ b/crates/core/src/repofile/configfile.rs @@ -1,11 +1,33 @@ +use std::num::ParseIntError; + use serde_derive::{Deserialize, Serialize}; use serde_with::skip_serializing_none; use crate::{ - backend::FileType, blob::BlobType, define_new_id_struct, error::ConfigFileErrorKind, - impl_repofile, repofile::RepoFile, RusticResult, + backend::FileType, blob::BlobType, define_new_id_struct, error::RusticResult, impl_repofile, + repofile::RepoFile, }; +/// [`ConfigFileErrorKind`] describes the errors that can be returned for `ConfigFile`s +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum ConfigFileErrorKind { + /// config version not supported: {version}, compression: {compression:?} + ConfigVersionNotSupported { + /// The version of the config + version: u32, + /// The compression level + compression: Option, + }, + /// Parsing failed for polynomial: {polynomial} : {source} + ParsingFailedForPolynomial { + polynomial: String, + source: ParseIntError, + }, +} + +pub(crate) type ConfigFileResult = Result; + pub(super) mod constants { pub(super) const KB: u32 = 1024; @@ -133,10 +155,14 @@ impl ConfigFile { /// /// * [`ConfigFileErrorKind::ParsingFailedForPolynomial`] - If the polynomial could not be parsed /// - /// [`ConfigFileErrorKind::ParsingFailedForPolynomial`]: crate::error::ConfigFileErrorKind::ParsingFailedForPolynomial + /// [`ConfigFileErrorKind::ParsingFailedForPolynomial`]: ConfigFileErrorKind::ParsingFailedForPolynomial pub fn poly(&self) -> RusticResult { Ok(u64::from_str_radix(&self.chunker_polynomial, 16) - .map_err(ConfigFileErrorKind::ParsingFailedForPolynomial)?) + .map_err(|err| ConfigFileErrorKind::ParsingFailedForPolynomial { + polynomial: self.chunker_polynomial.clone(), + source: err, + }) + .map_err(|_err| todo!("Error transition"))?) } /// Get the compression level @@ -145,17 +171,21 @@ impl ConfigFile { /// /// * [`ConfigFileErrorKind::ConfigVersionNotSupported`] - If the version is not supported /// - /// [`ConfigFileErrorKind::ConfigVersionNotSupported`]: crate::error::ConfigFileErrorKind::ConfigVersionNotSupported + /// [`ConfigFileErrorKind::ConfigVersionNotSupported`]: ConfigFileErrorKind::ConfigVersionNotSupported pub fn zstd(&self) -> RusticResult> { match (self.version, self.compression) { (1, _) | (2, Some(0)) => Ok(None), (2, None) => Ok(Some(0)), // use default (=0) zstd compression (2, Some(c)) => Ok(Some(c)), - _ => Err(ConfigFileErrorKind::ConfigVersionNotSupported.into()), + _ => Err(ConfigFileErrorKind::ConfigVersionNotSupported { + version: self.version, + compression: self.compression, + }) + .map_err(|_err| todo!("Error transition")), } } - /// Get wheter an extra verification (decompressing/decrypting data before writing to the repository) should be performed. + /// Get whether an extra verification (decompressing/decrypting data before writing to the repository) should be performed. #[must_use] pub fn extra_verify(&self) -> bool { self.extra_verify.unwrap_or(true) // default is to do the extra check diff --git a/crates/core/src/repofile/keyfile.rs b/crates/core/src/repofile/keyfile.rs index cbd44c2cf..0abcf47c1 100644 --- a/crates/core/src/repofile/keyfile.rs +++ b/crates/core/src/repofile/keyfile.rs @@ -6,11 +6,49 @@ use serde_with::{base64::Base64, serde_as, skip_serializing_none}; use crate::{ backend::{FileType, ReadBackend}, - crypto::{aespoly1305::Key, CryptoKey}, - error::{CryptoErrorKind, KeyFileErrorKind, RusticErrorKind, RusticResult}, - impl_repoid, RusticError, + crypto::{aespoly1305::Key, CryptoErrorKind, CryptoKey}, + error::RusticResult, + impl_repoid, }; +/// [`KeyFileErrorKind`] describes the errors that can be returned for `KeyFile`s +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum KeyFileErrorKind { + /// no suitable key found! + NoSuitableKeyFound, + /// listing KeyFiles failed + ListingKeyFilesFailed, + /// couldn't get KeyFile from backend + CouldNotGetKeyFileFromBackend, + /// serde_json couldn't deserialize the data for the key: `{key_id:?}` : `{source}` + DeserializingFromSliceForKeyIdFailed { + /// The id of the key + key_id: KeyId, + /// The error that occurred + source: serde_json::Error, + }, + /// serde_json couldn't serialize the data into a JSON byte vector: `{0:?}` + CouldNotSerializeAsJsonByteVector(serde_json::Error), + /// output length is invalid: `{0:?}` + OutputLengthInvalid(scrypt::errors::InvalidOutputLen), + /// invalid scrypt parameters: `{0:?}` + InvalidSCryptParameters(scrypt::errors::InvalidParams), + /// Could not get key from decrypt data: `{key:?}` : `{source}` + CouldNotGetKeyFromDecryptData { key: Key, source: CryptoErrorKind }, + /// deserializing master key from slice failed: `{source}` + DeserializingMasterKeyFromSliceFailed { source: serde_json::Error }, + /// conversion from {from} to {to} failed for {x} : {source} + ConversionFailed { + from: &'static str, + to: &'static str, + x: u32, + source: std::num::TryFromIntError, + }, +} + +pub(crate) type KeyFileResult = Result; + pub(super) mod constants { /// Returns the number of bits of the given type. pub(super) const fn num_bits() -> usize { @@ -79,12 +117,19 @@ impl KeyFile { /// [`KeyFileErrorKind::InvalidSCryptParameters`]: crate::error::KeyFileErrorKind::InvalidSCryptParameters /// [`KeyFileErrorKind::OutputLengthInvalid`]: crate::error::KeyFileErrorKind::OutputLengthInvalid pub fn kdf_key(&self, passwd: &impl AsRef<[u8]>) -> RusticResult { - let params = Params::new(log_2(self.n)?, self.r, self.p, Params::RECOMMENDED_LEN) - .map_err(KeyFileErrorKind::InvalidSCryptParameters)?; + let params = Params::new( + log_2(self.n).map_err(|_err| todo!("Error transition"))?, + self.r, + self.p, + Params::RECOMMENDED_LEN, + ) + .map_err(KeyFileErrorKind::InvalidSCryptParameters) + .map_err(|_err| todo!("Error transition"))?; let mut key = [0; 64]; scrypt::scrypt(passwd.as_ref(), &self.salt, ¶ms, &mut key) - .map_err(KeyFileErrorKind::OutputLengthInvalid)?; + .map_err(KeyFileErrorKind::OutputLengthInvalid) + .map_err(|_err| todo!("Error transition"))?; Ok(Key::from_slice(&key)) } @@ -106,9 +151,17 @@ impl KeyFile { /// /// [`KeyFileErrorKind::DeserializingFromSliceFailed`]: crate::error::KeyFileErrorKind::DeserializingFromSliceFailed pub fn key_from_data(&self, key: &Key) -> RusticResult { - let dec_data = key.decrypt_data(&self.data)?; + let dec_data = key + .decrypt_data(&self.data) + .map_err(|err| KeyFileErrorKind::CouldNotGetKeyFromDecryptData { + key: key.clone(), + source: err, + }) + .map_err(|_err| todo!("Error transition"))?; + Ok(serde_json::from_slice::(&dec_data) - .map_err(KeyFileErrorKind::DeserializingFromSliceFailed)? + .map_err(|err| KeyFileErrorKind::DeserializingMasterKeyFromSliceFailed { source: err }) + .map_err(|_err| todo!("Error transition"))? .key()) } @@ -167,13 +220,17 @@ impl KeyFile { let mut key = [0; 64]; scrypt::scrypt(passwd.as_ref(), &salt, ¶ms, &mut key) - .map_err(KeyFileErrorKind::OutputLengthInvalid)?; + .map_err(KeyFileErrorKind::OutputLengthInvalid) + .map_err(|_err| todo!("Error transition"))?; let key = Key::from_slice(&key); - let data = key.encrypt_data( - &serde_json::to_vec(&masterkey) - .map_err(KeyFileErrorKind::CouldNotSerializeAsJsonByteVector)?, - )?; + let data = key + .encrypt_data( + &serde_json::to_vec(&masterkey) + .map_err(KeyFileErrorKind::CouldNotSerializeAsJsonByteVector) + .map_err(|_err| todo!("Error transition"))?, + ) + .map_err(|_err| todo!("Error transition"))?; Ok(Self { hostname, @@ -203,13 +260,16 @@ impl KeyFile { /// /// The [`KeyFile`] read from the backend fn from_backend(be: &B, id: &KeyId) -> RusticResult { - let data = be - .read_full(FileType::Key, id) - .map_err(RusticErrorKind::Backend)?; - Ok( - serde_json::from_slice(&data) - .map_err(KeyFileErrorKind::DeserializingFromSliceFailed)?, - ) + let data = be.read_full(FileType::Key, id)?; + + Ok(serde_json::from_slice(&data) + .map_err( + |err| KeyFileErrorKind::DeserializingFromSliceForKeyIdFailed { + key_id: id.clone(), + source: err, + }, + ) + .map_err(|_err| todo!("Error transition"))?) } } @@ -228,12 +288,21 @@ impl KeyFile { /// The logarithm to base 2 of the given number /// /// [`KeyFileErrorKind::ConversionFromU32ToU8Failed`]: crate::error::KeyFileErrorKind::ConversionFromU32ToU8Failed -fn log_2(x: u32) -> RusticResult { +fn log_2(x: u32) -> KeyFileResult { assert!(x > 0); - Ok(u8::try_from(constants::num_bits::()) - .map_err(KeyFileErrorKind::ConversionFromU32ToU8Failed)? - - u8::try_from(x.leading_zeros()).map_err(KeyFileErrorKind::ConversionFromU32ToU8Failed)? - - 1) + Ok(u8::try_from(constants::num_bits::()).map_err(|err| { + KeyFileErrorKind::ConversionFailed { + from: "usize", + to: "u8", + x, + source: err, + } + })? - u8::try_from(x.leading_zeros()).map_err(|err| KeyFileErrorKind::ConversionFailed { + from: "u32", + to: "u8", + x, + source: err, + })? - 1) } /// The mac of a [`Key`] @@ -334,15 +403,15 @@ pub(crate) fn find_key_in_backend( if let Some(id) = hint { key_from_backend(be, id, passwd) } else { - for id in be.list(FileType::Key).map_err(RusticErrorKind::Backend)? { + for id in be.list(FileType::Key)? { match key_from_backend(be, &id.into(), passwd) { Ok(key) => return Ok(key), - Err(RusticError(RusticErrorKind::Crypto( - CryptoErrorKind::DataDecryptionFailed(_), - ))) => continue, + // TODO: We get a RusticError here and we need to determine, if we have a WrongKey error + // TODO: We should probably implement something for that on RusticError or use a variant for this + Err(KeyFileErrorKind::DataDecryptionFailed(_)) => continue, err => return err, } } - Err(KeyFileErrorKind::NoSuitableKeyFound.into()) + Err(KeyFileErrorKind::NoSuitableKeyFound).map_err(|_err| todo!("Error transition")) } } diff --git a/crates/core/src/repofile/packfile.rs b/crates/core/src/repofile/packfile.rs index 87a2404db..741b7a242 100644 --- a/crates/core/src/repofile/packfile.rs +++ b/crates/core/src/repofile/packfile.rs @@ -6,13 +6,36 @@ use log::trace; use crate::{ backend::{decrypt::DecryptReadBackend, FileType}, blob::BlobType, - error::{PackFileErrorKind, RusticErrorKind}, + error::RusticResult, id::Id, impl_repoid, repofile::indexfile::{IndexBlob, IndexPack}, - RusticResult, }; +/// [`PackFileErrorKind`] describes the errors that can be returned for `PackFile`s +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum PackFileErrorKind { + /// Failed reading binary representation of the pack header: `{0:?}` + ReadingBinaryRepresentationFailed(binrw::Error), + /// Failed writing binary representation of the pack header: `{0:?}` + WritingBinaryRepresentationFailed(binrw::Error), + /// Read header length is too large! Length: `{size_real}`, file size: `{pack_size}` + HeaderLengthTooLarge { size_real: u32, pack_size: u32 }, + /// Read header length doesn't match header contents! Length: `{size_real}`, computed: `{size_computed}` + HeaderLengthDoesNotMatchHeaderContents { size_real: u32, size_computed: u32 }, + /// pack size computed from header doesn't match real pack isch! Computed: `{size_computed}`, real: `{size_real}` + HeaderPackSizeComputedDoesNotMatchRealPackFile { size_real: u32, size_computed: u32 }, + /// decrypting from binary failed + BinaryDecryptionFailed, + /// Partial read of PackFile failed + PartialReadOfPackfileFailed, + /// writing Bytes failed + WritingBytesFailed, +} + +pub(crate) type PackFileResult = Result; + impl_repoid!(PackId, FileType::Pack); pub(super) mod constants { @@ -57,7 +80,7 @@ impl PackHeaderLength { /// * [`PackFileErrorKind::ReadingBinaryRepresentationFailed`] - If reading the binary representation failed /// /// [`PackFileErrorKind::ReadingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::ReadingBinaryRepresentationFailed - pub(crate) fn from_binary(data: &[u8]) -> RusticResult { + pub(crate) fn from_binary(data: &[u8]) -> PackFileResult { let mut reader = Cursor::new(data); Ok( Self::read(&mut reader) @@ -72,7 +95,7 @@ impl PackHeaderLength { /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If writing the binary representation failed /// /// [`PackFileErrorKind::WritingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::WritingBinaryRepresentationFailed - pub(crate) fn to_binary(self) -> RusticResult> { + pub(crate) fn to_binary(self) -> PackFileResult> { let mut writer = Cursor::new(Vec::with_capacity(4)); self.write(&mut writer) .map_err(PackFileErrorKind::WritingBinaryRepresentationFailed)?; @@ -118,7 +141,7 @@ pub enum HeaderEntry { CompTree { /// Lengths within a packfile len: u32, - /// Raw blob length withou compression/encryption + /// Raw blob length without compression/encryption len_data: u32, /// Id of compressed tree blob id: Id, @@ -223,7 +246,7 @@ impl PackHeader { /// * [`PackFileErrorKind::ReadingBinaryRepresentationFailed`] - If reading the binary representation failed /// /// [`PackFileErrorKind::ReadingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::ReadingBinaryRepresentationFailed - pub(crate) fn from_binary(pack: &[u8]) -> RusticResult { + pub(crate) fn from_binary(pack: &[u8]) -> PackFileResult { let mut reader = Cursor::new(pack); let mut offset = 0; let mut blobs = Vec::new(); @@ -275,21 +298,20 @@ impl PackHeader { // read (guessed) header + length field let read_size = size_guess + constants::LENGTH_LEN; let offset = pack_size - read_size; - let mut data = be - .read_partial(FileType::Pack, &id, false, offset, read_size) - .map_err(RusticErrorKind::Backend)?; + let mut data = be.read_partial(FileType::Pack, &id, false, offset, read_size)?; // get header length from the file - let size_real = - PackHeaderLength::from_binary(&data.split_off(size_guess as usize))?.to_u32(); + let size_real = PackHeaderLength::from_binary(&data.split_off(size_guess as usize)) + .map_err(|_err| todo!("Error transition"))? + .to_u32(); trace!("header size: {size_real}"); if size_real + constants::LENGTH_LEN > pack_size { return Err(PackFileErrorKind::HeaderLengthTooLarge { size_real, pack_size, - } - .into()); + }) + .map_err(|_err| todo!("Error transition")); } // now read the header @@ -299,18 +321,18 @@ impl PackHeader { } else { // size_guess was too small; we have to read again let offset = pack_size - size_real - constants::LENGTH_LEN; - be.read_partial(FileType::Pack, &id, false, offset, size_real) - .map_err(RusticErrorKind::Backend)? + be.read_partial(FileType::Pack, &id, false, offset, size_real)? }; - let header = Self::from_binary(&be.decrypt(&data)?)?; + let header = + Self::from_binary(&be.decrypt(&data)?).map_err(|_err| todo!("Error transition"))?; if header.size() != size_real { return Err(PackFileErrorKind::HeaderLengthDoesNotMatchHeaderContents { size_real, size_computed: header.size(), - } - .into()); + }) + .map_err(|_err| todo!("Error transition")); } if header.pack_size() != pack_size { @@ -318,9 +340,9 @@ impl PackHeader { PackFileErrorKind::HeaderPackSizeComputedDoesNotMatchRealPackFile { size_real: pack_size, size_computed: header.pack_size(), - } - .into(), - ); + }, + ) + .map_err(|_err| todo!("Error transition")); } Ok(header) @@ -384,7 +406,7 @@ impl<'a> PackHeaderRef<'a> { /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If writing the binary representation failed /// /// [`PackFileErrorKind::WritingBinaryRepresentationFailed`]: crate::error::PackFileErrorKind::WritingBinaryRepresentationFailed - pub(crate) fn to_binary(&self) -> RusticResult> { + pub(crate) fn to_binary(&self) -> PackFileResult> { let mut writer = Cursor::new(Vec::with_capacity(self.pack_size() as usize)); // collect header entries for blob in self.0 { diff --git a/crates/core/src/repofile/snapshotfile.rs b/crates/core/src/repofile/snapshotfile.rs index a973bfce7..db0474544 100644 --- a/crates/core/src/repofile/snapshotfile.rs +++ b/crates/core/src/repofile/snapshotfile.rs @@ -1,12 +1,13 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, + ffi::OsString, fmt::{self, Display}, path::{Path, PathBuf}, str::FromStr, }; -use chrono::{DateTime, Duration, Local}; +use chrono::{DateTime, Duration, Local, OutOfRangeError}; use derivative::Derivative; use derive_setters::Setters; use dunce::canonicalize; @@ -20,7 +21,7 @@ use serde_with::{serde_as, skip_serializing_none, DisplayFromStr}; use crate::{ backend::{decrypt::DecryptReadBackend, FileType, FindInBackend}, blob::tree::TreeId, - error::{RusticError, RusticErrorKind, RusticResult, SnapshotFileErrorKind}, + error::RusticResult, impl_repofile, progress::Progress, repofile::RepoFile, @@ -30,6 +31,38 @@ use crate::{ #[cfg(feature = "clap")] use clap::ValueHint; +/// [`SnapshotFileErrorKind`] describes the errors that can be returned for `SnapshotFile`s +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum SnapshotFileErrorKind { + /// non-unicode hostname `{0:?}` + NonUnicodeHostname(OsString), + /// non-unicode path `{0:?}` + NonUnicodePath(PathBuf), + /// no snapshots found + NoSnapshotsFound, + /// value `{0:?}` not allowed + ValueNotAllowed(String), + /// datetime out of range: `{0:?}` + OutOfRange(OutOfRangeError), + /// reading the description file failed: `{0:?}` + ReadingDescriptionFailed(std::io::Error), + /// getting the SnapshotFile from the backend failed + GettingSnapshotFileFailed, + /// getting the SnapshotFile by ID failed + GettingSnapshotFileByIdFailed, + /// unpacking SnapshotFile result failed + UnpackingSnapshotFileResultFailed, + /// collecting IDs failed: `{0:?}` + FindingIdsFailed(Vec), + /// removing dots from paths failed: `{0:?}` + RemovingDotsFromPathFailed(std::io::Error), + /// canonicalizing path failed: `{0:?}` + CanonicalizingPathFailed(std::io::Error), +} + +pub(crate) type SnapshotFileResult = Result; + /// Options for creating a new [`SnapshotFile`] structure for a new backup snapshot. /// /// This struct derives [`serde::Deserialize`] allowing to use it in config files. @@ -118,7 +151,8 @@ impl SnapshotOptions { /// /// [`SnapshotFileErrorKind::NonUnicodeTag`]: crate::error::SnapshotFileErrorKind::NonUnicodeTag pub fn add_tags(mut self, tag: &str) -> RusticResult { - self.tags.push(StringList::from_str(tag)?); + self.tags + .push(StringList::from_str(tag).map_err(|_err| todo!("Error transition"))?); Ok(self) } @@ -235,7 +269,7 @@ impl SnapshotSummary { /// * [`SnapshotFileErrorKind::OutOfRange`] - If the time is not in the range of `Local::now()` /// /// [`SnapshotFileErrorKind::OutOfRange`]: crate::error::SnapshotFileErrorKind::OutOfRange - pub(crate) fn finalize(&mut self, snap_time: DateTime) -> RusticResult<()> { + pub(crate) fn finalize(&mut self, snap_time: DateTime) -> SnapshotFileResult<()> { let end_time = Local::now(); self.backup_duration = (end_time - self.backup_start) .to_std() @@ -373,7 +407,8 @@ impl SnapshotFile { let hostname = gethostname(); hostname .to_str() - .ok_or_else(|| SnapshotFileErrorKind::NonUnicodeHostname(hostname.clone()))? + .ok_or_else(|| SnapshotFileErrorKind::NonUnicodeHostname(hostname.clone())) + .map_err(|_err| todo!("Error transition"))? .to_string() }; @@ -382,7 +417,9 @@ impl SnapshotFile { let delete = match (opts.delete_never, opts.delete_after) { (true, _) => DeleteOption::Never, (_, Some(d)) => DeleteOption::After( - time + Duration::from_std(*d).map_err(SnapshotFileErrorKind::OutOfRange)?, + time + Duration::from_std(*d) + .map_err(SnapshotFileErrorKind::OutOfRange) + .map_err(|_err| todo!("Error transition"))?, ), (false, None) => DeleteOption::NotSet, }; @@ -414,7 +451,8 @@ impl SnapshotFile { if let Some(ref file) = opts.description_from { snap.description = Some( std::fs::read_to_string(file) - .map_err(SnapshotFileErrorKind::ReadingDescriptionFailed)?, + .map_err(SnapshotFileErrorKind::ReadingDescriptionFailed) + .map_err(|_err| todo!("Error transition"))?, ); } @@ -512,7 +550,9 @@ impl SnapshotFile { } } p.finish(); - latest.ok_or_else(|| SnapshotFileErrorKind::NoSnapshotsFound.into()) + latest + .ok_or_else(|| SnapshotFileErrorKind::NoSnapshotsFound) + .map_err(|_err| todo!("Error transition")) } /// Get a [`SnapshotFile`] from the backend by (part of the) id @@ -752,9 +792,7 @@ impl SnapshotFile { B: DecryptReadBackend, F: FnMut(&Self) -> bool, { - let ids = be - .list(FileType::Snapshot) - .map_err(RusticErrorKind::Backend)?; + let ids = be.list(FileType::Snapshot)?; Self::fill_missing(be, current, &ids, filter, p) } @@ -946,8 +984,8 @@ impl Default for SnapshotGroupCriterion { } impl FromStr for SnapshotGroupCriterion { - type Err = RusticError; - fn from_str(s: &str) -> RusticResult { + type Err = SnapshotFileErrorKind; + fn from_str(s: &str) -> SnapshotFileResult { let mut crit = Self::new(); for val in s.split(',') { match val { @@ -1052,8 +1090,8 @@ impl SnapshotGroup { pub struct StringList(pub(crate) BTreeSet); impl FromStr for StringList { - type Err = RusticError; - fn from_str(s: &str) -> RusticResult { + type Err = SnapshotFileErrorKind; + fn from_str(s: &str) -> SnapshotFileResult { Ok(Self(s.split(',').map(ToString::to_string).collect())) } } @@ -1137,7 +1175,7 @@ impl StringList { /// * [`SnapshotFileErrorKind::NonUnicodePath`] - If a path is not valid unicode /// /// [`SnapshotFileErrorKind::NonUnicodePath`]: crate::error::SnapshotFileErrorKind::NonUnicodePath - pub(crate) fn set_paths>(&mut self, paths: &[T]) -> RusticResult<()> { + pub(crate) fn set_paths>(&mut self, paths: &[T]) -> SnapshotFileResult<()> { self.0 = paths .iter() .map(|p| { @@ -1146,7 +1184,7 @@ impl StringList { .ok_or_else(|| SnapshotFileErrorKind::NonUnicodePath(p.as_ref().to_path_buf()))? .to_string()) }) - .collect::>>()?; + .collect::>>()?; Ok(()) } @@ -1248,7 +1286,7 @@ impl PathList { /// /// [`SnapshotFileErrorKind::RemovingDotsFromPathFailed`]: crate::error::SnapshotFileErrorKind::RemovingDotsFromPathFailed /// [`SnapshotFileErrorKind::CanonicalizingPathFailed`]: crate::error::SnapshotFileErrorKind::CanonicalizingPathFailed - pub fn sanitize(mut self) -> RusticResult { + pub fn sanitize(mut self) -> SnapshotFileResult { for path in &mut self.0 { *path = sanitize_dot(path)?; } @@ -1285,7 +1323,7 @@ impl PathList { } // helper function to sanitize paths containing dots -fn sanitize_dot(path: &Path) -> RusticResult { +fn sanitize_dot(path: &Path) -> SnapshotFileResult { if path == Path::new(".") || path == Path::new("./") { return Ok(PathBuf::from(".")); } diff --git a/crates/core/src/repository.rs b/crates/core/src/repository.rs index 607a42644..9d77bd59e 100644 --- a/crates/core/src/repository.rs +++ b/crates/core/src/repository.rs @@ -1,5 +1,5 @@ -mod command_input; -mod warm_up; +pub(crate) mod command_input; +pub(crate) mod warm_up; pub use command_input::CommandInput; @@ -9,7 +9,10 @@ use std::{ io::{BufRead, BufReader, Write}, path::{Path, PathBuf}, process::{Command, Stdio}, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, + }, }; use bytes::Bytes; @@ -48,7 +51,7 @@ use crate::{ restore::{collect_and_prepare, restore_repository, RestoreOptions, RestorePlan}, }, crypto::aespoly1305::Key, - error::{CommandErrorKind, KeyFileErrorKind, RepositoryErrorKind, RusticErrorKind}, + error::{ErrorKind, RusticResult}, index::{ binarysorted::{IndexCollector, IndexType}, GlobalIndex, IndexEntry, ReadGlobalIndex, ReadIndex, @@ -56,14 +59,14 @@ use crate::{ progress::{NoProgressBars, Progress, ProgressBars}, repofile::{ configfile::ConfigId, - keyfile::find_key_in_backend, + keyfile::{find_key_in_backend, KeyFileErrorKind}, packfile::PackId, snapshotfile::{SnapshotGroup, SnapshotGroupCriterion, SnapshotId}, ConfigFile, KeyId, PathList, RepoFile, RepoId, SnapshotFile, SnapshotSummary, Tree, }, repository::warm_up::{warm_up, warm_up_wait}, vfs::OpenFile, - RepositoryBackends, RusticResult, + RepositoryBackends, }; #[cfg(feature = "clap")] @@ -168,27 +171,27 @@ impl RepositoryOptions { /// /// # Errors /// - /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed - /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed - /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed - /// * [`RepositoryErrorKind::PasswordCommandExecutionFailed`] - If executing the password command failed - /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RusticErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RusticErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RusticErrorKind::FromSplitError`] - If splitting the password command failed + /// * [`RusticErrorKind::PasswordCommandExecutionFailed`] - If executing the password command failed + /// * [`RusticErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed /// /// # Returns /// /// The password or `None` if no password is given /// - /// [`RepositoryErrorKind::OpeningPasswordFileFailed`]: crate::error::RepositoryErrorKind::OpeningPasswordFileFailed - /// [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromReaderFailed - /// [`RepositoryErrorKind::FromSplitError`]: crate::error::RepositoryErrorKind::FromSplitError - /// [`RepositoryErrorKind::PasswordCommandExecutionFailed`]: crate::error::RepositoryErrorKind::PasswordCommandExecutionFailed - /// [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromCommandFailed + /// [`RusticErrorKind::OpeningPasswordFileFailed`]: crate::error::RusticErrorKind::OpeningPasswordFileFailed + /// [`RusticErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromReaderFailed + /// [`RusticErrorKind::FromSplitError`]: crate::error::RusticErrorKind::FromSplitError + /// [`RusticErrorKind::PasswordCommandExecutionFailed`]: crate::error::RusticErrorKind::PasswordCommandExecutionFailed + /// [`RusticErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromCommandFailed pub fn evaluate_password(&self) -> RusticResult> { match (&self.password, &self.password_file, &self.password_command) { (Some(pwd), _, _) => Ok(Some(pwd.clone())), (_, Some(file), _) => { let mut file = BufReader::new( - File::open(file).map_err(RepositoryErrorKind::OpeningPasswordFileFailed)?, + File::open(file).map_err(ErrorKind::OpeningPasswordFileFailed)?, ); Ok(Some(read_password_from_reader(&mut file)?)) } @@ -203,7 +206,7 @@ impl RepositoryOptions { Ok(process) => process, Err(err) => { error!("password-command could not be executed: {}", err); - return Err(RepositoryErrorKind::PasswordCommandExecutionFailed.into()); + return Err(ErrorKind::PasswordCommandExecutionFailed.into()); } }; @@ -211,7 +214,7 @@ impl RepositoryOptions { Ok(output) => output, Err(err) => { error!("error reading output from password-command: {}", err); - return Err(RepositoryErrorKind::ReadingPasswordFromCommandFailed.into()); + return Err(ErrorKind::ReadingPasswordFromCommandFailed.into()); } }; @@ -222,15 +225,13 @@ impl RepositoryOptions { None => "was terminated".into(), }; error!("password-command {s}"); - return Err(RepositoryErrorKind::PasswordCommandExecutionFailed.into()); + return Err(ErrorKind::PasswordCommandExecutionFailed.into()); } let mut pwd = BufReader::new(&*output.stdout); Ok(Some(match read_password_from_reader(&mut pwd) { Ok(val) => val, - Err(_) => { - return Err(RepositoryErrorKind::ReadingPasswordFromCommandFailed.into()) - } + Err(_) => return Err(ErrorKind::ReadingPasswordFromCommandFailed.into()), })) } (None, None, _) => Ok(None), @@ -246,14 +247,14 @@ impl RepositoryOptions { /// /// # Errors /// -/// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed +/// * [`RusticErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed /// -/// [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromReaderFailed +/// [`RusticErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromReaderFailed pub fn read_password_from_reader(file: &mut impl BufRead) -> RusticResult { let mut password = String::new(); _ = file .read_line(&mut password) - .map_err(RepositoryErrorKind::ReadingPasswordFromReaderFailed)?; + .map_err(ErrorKind::ReadingPasswordFromReaderFailed)?; // Remove the \n from the line if present if password.ends_with('\n') { @@ -309,8 +310,8 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::NoRepositoryGiven`] - If no repository is given - /// * [`RepositoryErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` + /// * [`RusticErrorKind::NoRepositoryGiven`] - If no repository is given + /// * [`RusticErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` /// * [`BackendAccessErrorKind::BackendLoadError`] - If the specified backend cannot be loaded, e.g. is not supported /// /// [`BackendAccessErrorKind::BackendLoadError`]: crate::error::BackendAccessErrorKind::BackendLoadError @@ -334,12 +335,12 @@ impl

Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::NoRepositoryGiven`] - If no repository is given - /// * [`RepositoryErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` + /// * [`RusticErrorKind::NoRepositoryGiven`] - If no repository is given + /// * [`RusticErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` /// * [`BackendAccessErrorKind::BackendLoadError`] - If the specified backend cannot be loaded, e.g. is not supported /// - /// [`RepositoryErrorKind::NoRepositoryGiven`]: crate::error::RepositoryErrorKind::NoRepositoryGiven - /// [`RepositoryErrorKind::NoIDSpecified`]: crate::error::RepositoryErrorKind::NoIDSpecified + /// [`RusticErrorKind::NoRepositoryGiven`]: crate::error::RusticErrorKind::NoRepositoryGiven + /// [`RusticErrorKind::NoIDSpecified`]: crate::error::RusticErrorKind::NoIDSpecified /// [`BackendAccessErrorKind::BackendLoadError`]: crate::error::BackendAccessErrorKind::BackendLoadError pub fn new_with_progress( opts: &RepositoryOptions, @@ -351,7 +352,7 @@ impl

Repository { if let Some(warm_up) = &opts.warm_up_command { if warm_up.args().iter().all(|c| !c.contains("%id")) { - return Err(RepositoryErrorKind::NoIDSpecified.into()); + return Err(ErrorKind::NoIDSpecified.into()); } info!("using warm-up command {warm_up}"); } @@ -383,21 +384,21 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed - /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed - /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed - /// * [`RepositoryErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed - /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RusticErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RusticErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RusticErrorKind::FromSplitError`] - If splitting the password command failed + /// * [`RusticErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed + /// * [`RusticErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed /// /// # Returns /// /// The password or `None` if no password is given /// - /// [`RepositoryErrorKind::OpeningPasswordFileFailed`]: crate::error::RepositoryErrorKind::OpeningPasswordFileFailed - /// [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromReaderFailed - /// [`RepositoryErrorKind::FromSplitError`]: crate::error::RepositoryErrorKind::FromSplitError - /// [`RepositoryErrorKind::PasswordCommandExecutionFailed`]: crate::error::RepositoryErrorKind::PasswordCommandExecutionFailed - /// [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromCommandFailed + /// [`RusticErrorKind::OpeningPasswordFileFailed`]: crate::error::RusticErrorKind::OpeningPasswordFileFailed + /// [`RusticErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromReaderFailed + /// [`RusticErrorKind::FromSplitError`]: crate::error::RusticErrorKind::FromSplitError + /// [`RusticErrorKind::PasswordCommandExecutionFailed`]: crate::error::RusticErrorKind::PasswordCommandExecutionFailed + /// [`RusticErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromCommandFailed pub fn password(&self) -> RusticResult> { self.opts.evaluate_password() } @@ -406,25 +407,25 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed - /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// * [`RusticErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RusticErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file /// /// # Returns /// /// The id of the config file or `None` if no config file is found /// - /// [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RepositoryErrorKind::ListingRepositoryConfigFileFailed - /// [`RepositoryErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RepositoryErrorKind::MoreThanOneRepositoryConfig + /// [`RusticErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RusticErrorKind::ListingRepositoryConfigFileFailed + /// [`RusticErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RusticErrorKind::MoreThanOneRepositoryConfig pub fn config_id(&self) -> RusticResult> { let config_ids = self .be .list(FileType::Config) - .map_err(|_| RepositoryErrorKind::ListingRepositoryConfigFileFailed)?; + .map_err(|_| ErrorKind::ListingRepositoryConfigFileFailed)?; match config_ids.len() { 1 => Ok(Some(ConfigId::from(config_ids[0]))), 0 => Ok(None), - _ => Err(RepositoryErrorKind::MoreThanOneRepositoryConfig(self.name.clone()).into()), + _ => Err(ErrorKind::MoreThanOneRepositoryConfig(self.name.clone()).into()), } } @@ -434,39 +435,37 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::NoPasswordGiven`] - If no password is given - /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed - /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed - /// * [`RepositoryErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed - /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed - /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed - /// * [`RepositoryErrorKind::NoRepositoryConfigFound`] - If no repository config file is found - /// * [`RepositoryErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match - /// * [`RepositoryErrorKind::IncorrectPassword`] - If the password is incorrect + /// * [`RusticErrorKind::NoPasswordGiven`] - If no password is given + /// * [`RusticErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RusticErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RusticErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed + /// * [`RusticErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RusticErrorKind::FromSplitError`] - If splitting the password command failed + /// * [`RusticErrorKind::NoRepositoryConfigFound`] - If no repository config file is found + /// * [`RusticErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match + /// * [`RusticErrorKind::IncorrectPassword`] - If the password is incorrect /// * [`KeyFileErrorKind::NoSuitableKeyFound`] - If no suitable key is found - /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed - /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// * [`RusticErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RusticErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file /// /// # Returns /// /// The open repository /// - /// [`RepositoryErrorKind::NoPasswordGiven`]: crate::error::RepositoryErrorKind::NoPasswordGiven - /// [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromReaderFailed - /// [`RepositoryErrorKind::OpeningPasswordFileFailed`]: crate::error::RepositoryErrorKind::OpeningPasswordFileFailed - /// [`RepositoryErrorKind::PasswordCommandExecutionFailed`]: crate::error::RepositoryErrorKind::PasswordCommandExecutionFailed - /// [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromCommandFailed - /// [`RepositoryErrorKind::FromSplitError`]: crate::error::RepositoryErrorKind::FromSplitError - /// [`RepositoryErrorKind::NoRepositoryConfigFound`]: crate::error::RepositoryErrorKind::NoRepositoryConfigFound - /// [`RepositoryErrorKind::KeysDontMatchForRepositories`]: crate::error::RepositoryErrorKind::KeysDontMatchForRepositories - /// [`RepositoryErrorKind::IncorrectPassword`]: crate::error::RepositoryErrorKind::IncorrectPassword + /// [`RusticErrorKind::NoPasswordGiven`]: crate::error::RusticErrorKind::NoPasswordGiven + /// [`RusticErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromReaderFailed + /// [`RusticErrorKind::OpeningPasswordFileFailed`]: crate::error::RusticErrorKind::OpeningPasswordFileFailed + /// [`RusticErrorKind::PasswordCommandExecutionFailed`]: crate::error::RusticErrorKind::PasswordCommandExecutionFailed + /// [`RusticErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromCommandFailed + /// [`RusticErrorKind::FromSplitError`]: crate::error::RusticErrorKind::FromSplitError + /// [`RusticErrorKind::NoRepositoryConfigFound`]: crate::error::RusticErrorKind::NoRepositoryConfigFound + /// [`RusticErrorKind::KeysDontMatchForRepositories`]: crate::error::RusticErrorKind::KeysDontMatchForRepositories + /// [`RusticErrorKind::IncorrectPassword`]: crate::error::RusticErrorKind::IncorrectPassword /// [`KeyFileErrorKind::NoSuitableKeyFound`]: crate::error::KeyFileErrorKind::NoSuitableKeyFound - /// [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RepositoryErrorKind::ListingRepositoryConfigFileFailed - /// [`RepositoryErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RepositoryErrorKind::MoreThanOneRepositoryConfig + /// [`RusticErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RusticErrorKind::ListingRepositoryConfigFileFailed + /// [`RusticErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RusticErrorKind::MoreThanOneRepositoryConfig pub fn open(self) -> RusticResult> { - let password = self - .password()? - .ok_or(RepositoryErrorKind::NoPasswordGiven)?; + let password = self.password()?.ok_or(ErrorKind::NoPasswordGiven)?; self.open_with_password(&password) } @@ -480,50 +479,40 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::NoRepositoryConfigFound`] - If no repository config file is found - /// * [`RepositoryErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match - /// * [`RepositoryErrorKind::IncorrectPassword`] - If the password is incorrect + /// * [`RusticErrorKind::NoRepositoryConfigFound`] - If no repository config file is found + /// * [`RusticErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match + /// * [`RusticErrorKind::IncorrectPassword`] - If the password is incorrect /// * [`KeyFileErrorKind::NoSuitableKeyFound`] - If no suitable key is found - /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed - /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// * [`RusticErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RusticErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file /// - /// [`RepositoryErrorKind::NoRepositoryConfigFound`]: crate::error::RepositoryErrorKind::NoRepositoryConfigFound - /// [`RepositoryErrorKind::KeysDontMatchForRepositories`]: crate::error::RepositoryErrorKind::KeysDontMatchForRepositories - /// [`RepositoryErrorKind::IncorrectPassword`]: crate::error::RepositoryErrorKind::IncorrectPassword + /// [`RusticErrorKind::NoRepositoryConfigFound`]: crate::error::RusticErrorKind::NoRepositoryConfigFound + /// [`RusticErrorKind::KeysDontMatchForRepositories`]: crate::error::RusticErrorKind::KeysDontMatchForRepositories + /// [`RusticErrorKind::IncorrectPassword`]: crate::error::RusticErrorKind::IncorrectPassword /// [`KeyFileErrorKind::NoSuitableKeyFound`]: crate::error::KeyFileErrorKind::NoSuitableKeyFound - /// [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RepositoryErrorKind::ListingRepositoryConfigFileFailed - /// [`RepositoryErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RepositoryErrorKind::MoreThanOneRepositoryConfig + /// [`RusticErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RusticErrorKind::ListingRepositoryConfigFileFailed + /// [`RusticErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RusticErrorKind::MoreThanOneRepositoryConfig pub fn open_with_password(self, password: &str) -> RusticResult> { let config_id = self .config_id()? - .ok_or(RepositoryErrorKind::NoRepositoryConfigFound( - self.name.clone(), - ))?; + .ok_or(ErrorKind::NoRepositoryConfigFound(self.name.clone())) + .map_err(|_err| todo!("Error transition"))?; if let Some(be_hot) = &self.be_hot { - let mut keys = self - .be - .list_with_size(FileType::Key) - .map_err(RusticErrorKind::Backend)?; + let mut keys = self.be.list_with_size(FileType::Key)?; keys.sort_unstable_by_key(|key| key.0); - let mut hot_keys = be_hot - .list_with_size(FileType::Key) - .map_err(RusticErrorKind::Backend)?; + let mut hot_keys = be_hot.list_with_size(FileType::Key)?; hot_keys.sort_unstable_by_key(|key| key.0); if keys != hot_keys { - return Err(RepositoryErrorKind::KeysDontMatchForRepositories(self.name).into()); + return Err(ErrorKind::KeysDontMatchForRepositories(self.name).into()) + .map_err(|_err| todo!("Error transition")); } } - let key = find_key_in_backend(&self.be, &password, None).map_err(|err| { - match err.into_inner() { - RusticErrorKind::KeyFile(KeyFileErrorKind::NoSuitableKeyFound) => { - RepositoryErrorKind::IncorrectPassword.into() - } - err => err, - } - })?; + let key = find_key_in_backend(&self.be, &password, None)?; + info!("repository {}: password is correct.", self.name); + let dbe = DecryptBackend::new(self.be.clone(), key); let config: ConfigFile = dbe.get_file(&config_id)?; self.open_raw(key, config) @@ -544,27 +533,25 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::NoPasswordGiven`] - If no password is given - /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed - /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed - /// * [`RepositoryErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed - /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed - /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed - /// - /// [`RepositoryErrorKind::NoPasswordGiven`]: crate::error::RepositoryErrorKind::NoPasswordGiven - /// [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromReaderFailed - /// [`RepositoryErrorKind::OpeningPasswordFileFailed`]: crate::error::RepositoryErrorKind::OpeningPasswordFileFailed - /// [`RepositoryErrorKind::PasswordCommandExecutionFailed`]: crate::error::RepositoryErrorKind::PasswordCommandExecutionFailed - /// [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RepositoryErrorKind::ReadingPasswordFromCommandFailed - /// [`RepositoryErrorKind::FromSplitError`]: crate::error::RepositoryErrorKind::FromSplitError + /// * [`RusticErrorKind::NoPasswordGiven`] - If no password is given + /// * [`RusticErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RusticErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RusticErrorKind::PasswordCommandExecutionFailed`] - If parsing the password command failed + /// * [`RusticErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RusticErrorKind::FromSplitError`] - If splitting the password command failed + /// + /// [`RusticErrorKind::NoPasswordGiven`]: crate::error::RusticErrorKind::NoPasswordGiven + /// [`RusticErrorKind::ReadingPasswordFromReaderFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromReaderFailed + /// [`RusticErrorKind::OpeningPasswordFileFailed`]: crate::error::RusticErrorKind::OpeningPasswordFileFailed + /// [`RusticErrorKind::PasswordCommandExecutionFailed`]: crate::error::RusticErrorKind::PasswordCommandExecutionFailed + /// [`RusticErrorKind::ReadingPasswordFromCommandFailed`]: crate::error::RusticErrorKind::ReadingPasswordFromCommandFailed + /// [`RusticErrorKind::FromSplitError`]: crate::error::RusticErrorKind::FromSplitError pub fn init( self, key_opts: &KeyOptions, config_opts: &ConfigOptions, ) -> RusticResult> { - let password = self - .password()? - .ok_or(RepositoryErrorKind::NoPasswordGiven)?; + let password = self.password()?.ok_or(ErrorKind::NoPasswordGiven)?; self.init_with_password(&password, key_opts, config_opts) } @@ -584,13 +571,13 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::ConfigFileExists`] - If a config file already exists - /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed - /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// * [`RusticErrorKind::ConfigFileExists`] - If a config file already exists + /// * [`RusticErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RusticErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file /// - /// [`RepositoryErrorKind::ConfigFileExists`]: crate::error::RepositoryErrorKind::ConfigFileExists - /// [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RepositoryErrorKind::ListingRepositoryConfigFileFailed - /// [`RepositoryErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RepositoryErrorKind::MoreThanOneRepositoryConfig + /// [`RusticErrorKind::ConfigFileExists`]: crate::error::RusticErrorKind::ConfigFileExists + /// [`RusticErrorKind::ListingRepositoryConfigFileFailed`]: crate::error::RusticErrorKind::ListingRepositoryConfigFileFailed + /// [`RusticErrorKind::MoreThanOneRepositoryConfig`]: crate::error::RusticErrorKind::MoreThanOneRepositoryConfig pub fn init_with_password( self, pass: &str, @@ -598,9 +585,10 @@ impl Repository { config_opts: &ConfigOptions, ) -> RusticResult> { if self.config_id()?.is_some() { - return Err(RepositoryErrorKind::ConfigFileExists.into()); + return Err(ErrorKind::ConfigFileExists.into()); } let (key, config) = commands::init::init(&self, pass, key_opts, config_opts)?; + self.open_raw(key, config) } @@ -645,15 +633,15 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::HotRepositoryFlagMissing`] - If the config file has `is_hot` set to `true` but the repository is not hot - /// * [`RepositoryErrorKind::IsNotHotRepository`] - If the config file has `is_hot` set to `false` but the repository is hot + /// * [`RusticErrorKind::HotRepositoryFlagMissing`] - If the config file has `is_hot` set to `true` but the repository is not hot + /// * [`RusticErrorKind::IsNotHotRepository`] - If the config file has `is_hot` set to `false` but the repository is hot /// - /// [`RepositoryErrorKind::HotRepositoryFlagMissing`]: crate::error::RepositoryErrorKind::HotRepositoryFlagMissing - /// [`RepositoryErrorKind::IsNotHotRepository`]: crate::error::RepositoryErrorKind::IsNotHotRepository + /// [`RusticErrorKind::HotRepositoryFlagMissing`]: crate::error::RusticErrorKind::HotRepositoryFlagMissing + /// [`RusticErrorKind::IsNotHotRepository`]: crate::error::RusticErrorKind::IsNotHotRepository fn open_raw(mut self, key: Key, config: ConfigFile) -> RusticResult> { match (config.is_hot == Some(true), self.be_hot.is_some()) { - (true, false) => return Err(RepositoryErrorKind::HotRepositoryFlagMissing.into()), - (false, true) => return Err(RepositoryErrorKind::IsNotHotRepository.into()), + (true, false) => return Err(ErrorKind::HotRepositoryFlagMissing.into()), + (false, true) => return Err(ErrorKind::IsNotHotRepository.into()), _ => {} } @@ -694,12 +682,7 @@ impl Repository { /// // TODO: Document errors pub fn list(&self) -> RusticResult> { - Ok(self - .be - .list(T::TYPE) - .map_err(RusticErrorKind::Backend)? - .into_iter() - .map(Into::into)) + Ok(self.be.list(T::TYPE)?.into_iter().map(Into::into)) } } @@ -721,14 +704,14 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::FromSplitError`] - If the command could not be parsed. - /// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. + /// * [`RusticErrorKind::FromSplitError`] - If the command could not be parsed. + /// * [`RusticErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. /// /// # Returns /// /// The result of the warm up pub fn warm_up(&self, packs: impl ExactSizeIterator) -> RusticResult<()> { - warm_up(self, packs) + warm_up(self, packs).map_err(|_err| todo!("Error transition")) } /// Warm up the given pack files and wait the configured waiting time. @@ -739,13 +722,13 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::FromSplitError`] - If the command could not be parsed. - /// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. + /// * [`RusticErrorKind::FromSplitError`] - If the command could not be parsed. + /// * [`RusticErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. /// - /// [`RepositoryErrorKind::FromSplitError`]: crate::error::RepositoryErrorKind::FromSplitError - /// [`RepositoryErrorKind::FromThreadPoolbilderError`]: crate::error::RepositoryErrorKind::FromThreadPoolbilderError + /// [`RusticErrorKind::FromSplitError`]: crate::error::RusticErrorKind::FromSplitError + /// [`RusticErrorKind::FromThreadPoolbilderError`]: crate::error::RusticErrorKind::FromThreadPoolbilderError pub fn warm_up_wait(&self, packs: impl ExactSizeIterator) -> RusticResult<()> { - warm_up_wait(self, packs) + warm_up_wait(self, packs).map_err(|_err| todo!("Error transition")) } } @@ -1116,10 +1099,9 @@ impl Repository { /// If the files could not be deleted. pub fn delete_snapshots(&self, ids: &[SnapshotId]) -> RusticResult<()> { if self.config().append_only == Some(true) { - return Err(CommandErrorKind::NotAllowedWithAppendOnly( - "snapshots removal".to_string(), - ) - .into()); + return Err( + ErrorKind::NotAllowedWithAppendOnly("snapshots removal".to_string()).into(), + ); } let p = self.pb.progress_counter("removing snapshots..."); self.dbe().delete_list(true, ids.iter(), p)?; @@ -1155,6 +1137,9 @@ impl Repository { /// # Errors /// // TODO: Document errors + /// # Panics + /// + /// If the error handling thread panicked pub fn check(&self, opts: CheckOptions) -> RusticResult<()> { let trees = self .get_all_snapshots()? @@ -1162,7 +1147,13 @@ impl Repository { .map(|snap| snap.tree) .collect(); - check_repository(self, opts, trees) + let errors = check_repository(self, opts, trees, err_send)?; + + if errors { + Err(CommandErrorKind::CheckFailed.into()).map_err(|_err| todo!("Error transition")) + } else { + Ok(()) + } } /// Check the repository and given trees for errors or inconsistencies @@ -1174,8 +1165,31 @@ impl Repository { /// # Errors /// // TODO: Document errors + /// # Panics + /// + /// If the error handling thread panicked pub fn check_with_trees(&self, opts: CheckOptions, trees: Vec) -> RusticResult<()> { - check_repository(self, opts, trees) + let (err_send, err_recv) = crossbeam_channel::unbounded(); + + let errors_occurred = Arc::new(AtomicBool::new(false)); + let errors_occurred_clone = errors_occurred.clone(); + + let err_handle = std::thread::spawn(move || { + for err in err_recv { + errors_occurred_clone.store(true, AtomicOrdering::Relaxed); + error!("{}", err); + } + }); + + check_repository(self, opts, trees, err_send)?; + + err_handle.join().expect("Error handling thread panicked"); + + if errors_occurred.load(AtomicOrdering::Relaxed) { + Err(CommandErrorKind::CheckFailed.into()).map_err(|_err| todo!("Error transition")) + } else { + Ok(()) + } } /// Get the plan about what should be pruned and/or repacked. @@ -1582,15 +1596,16 @@ impl Repository { /// /// # Errors /// - /// * [`RepositoryErrorKind::IdNotFound`] - If the id is not found in the index + /// * [`RusticErrorKind::IdNotFound`] - If the id is not found in the index /// - /// [`RepositoryErrorKind::IdNotFound`]: crate::error::RepositoryErrorKind::IdNotFound + /// [`RusticErrorKind::IdNotFound`]: crate::error::RusticErrorKind::IdNotFound pub fn get_index_entry(&self, id: &T) -> RusticResult { let blob_id: BlobId = (*id).into(); let ie = self .index() .get_id(T::TYPE, &blob_id) - .ok_or_else(|| RepositoryErrorKind::IdNotFound(blob_id))?; + .ok_or_else(|| ErrorKind::IdNotFound(blob_id)) + .map_err(|_err| todo!("Error transition"))?; Ok(ie) } diff --git a/crates/core/src/repository/command_input.rs b/crates/core/src/repository/command_input.rs index 6a258d4db..2d98e548c 100644 --- a/crates/core/src/repository/command_input.rs +++ b/crates/core/src/repository/command_input.rs @@ -8,10 +8,38 @@ use log::{debug, error, trace, warn}; use serde::{Deserialize, Serialize, Serializer}; use serde_with::{serde_as, DisplayFromStr, PickFirst}; -use crate::{ - error::{RepositoryErrorKind, RusticErrorKind}, - RusticError, RusticResult, -}; +use crate::error::RusticResult; + +/// [`CommandInputErrorKind`] describes the errors that can be returned from the CommandInput +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum CommandInputErrorKind { + /// Command execution failed: {context}:{what} : {source} + CommandExecutionFailed { + context: String, + what: String, + source: std::io::Error, + }, + /// Command error status: {context}:{what} : {status} + CommandErrorStatus { + context: String, + what: String, + status: ExitStatus, + }, + /// Splitting arguments failed: {arguments} : {source} + SplittingArgumentsFailed { + arguments: String, + source: shell_words::ParseError, + }, + /// Process execution failed: {command:?} : {path:?} : {source} + ProcessExecutionFailed { + command: CommandInput, + path: std::path::PathBuf, + source: std::io::Error, + }, +} + +pub(crate) type CommandInputResult = Result; /// A command to be called which can be given as CLI option as well as in config files /// `CommandInput` implements Serialize/Deserialize as well as FromStr. @@ -81,7 +109,7 @@ impl CommandInput { /// /// # Errors /// - /// `RusticError` if return status cannot be read + /// `CommandInputErrorKind` if return status cannot be read pub fn run(&self, context: &str, what: &str) -> RusticResult<()> { if !self.is_set() { trace!("not calling command {context}:{what} - not set"); @@ -95,7 +123,7 @@ impl CommandInput { } impl FromStr for CommandInput { - type Err = RusticError; + type Err = CommandInputErrorKind; fn from_str(s: &str) -> Result { Ok(Self(_CommandInput::from_str(s)?)) } @@ -137,7 +165,7 @@ impl From> for _CommandInput { } impl FromStr for _CommandInput { - type Err = RusticError; + type Err = CommandInputErrorKind; fn from_str(s: &str) -> Result { Ok(split(s)?.into()) } @@ -164,7 +192,7 @@ pub enum OnFailure { } impl OnFailure { - fn eval(self, res: RusticResult) -> RusticResult> { + fn eval(self, res: CommandInputResult) -> RusticResult> { let res = self.display_result(res); match (res, self) { (Err(err), Self::Error) => Err(err), @@ -174,11 +202,12 @@ impl OnFailure { } /// Displays a result depending on the defined error handling which still yielding the same result + /// /// # Note /// /// This can be used where an error might occur, but in that /// case we have to abort. - pub fn display_result(self, res: RusticResult) -> RusticResult { + pub fn display_result(self, res: CommandInputResult) -> RusticResult { if let Err(err) = &res { match self { Self::Error => { @@ -190,7 +219,7 @@ impl OnFailure { Self::Ignore => {} } } - res + res.map_err(|_err| todo!("Error transition")) } /// Handle a status of a called command depending on the defined error handling @@ -200,20 +229,22 @@ impl OnFailure { context: &str, what: &str, ) -> RusticResult<()> { - let status = status.map_err(|err| { - RepositoryErrorKind::CommandExecutionFailed(context.into(), what.into(), err).into() + let status = status.map_err(|err| CommandInputErrorKind::CommandExecutionFailed { + context: context.to_string(), + what: what.to_string(), + source: err, }); + let Some(status) = self.eval(status)? else { return Ok(()); }; if !status.success() { - let _: Option<()> = self.eval(Err(RepositoryErrorKind::CommandErrorStatus( - context.into(), - what.into(), + let _: Option<()> = self.eval(Err(CommandInputErrorKind::CommandErrorStatus { + context: context.to_string(), + what: what.to_string(), status, - ) - .into()))?; + }))?; } Ok(()) } @@ -221,6 +252,11 @@ impl OnFailure { /// helper to split arguments // TODO: Maybe use special parser (winsplit?) for windows? -fn split(s: &str) -> RusticResult> { - Ok(shell_words::split(s).map_err(|err| RusticErrorKind::Command(err.into()))?) +fn split(s: &str) -> CommandInputResult> { + Ok( + shell_words::split(s).map_err(|err| CommandInputErrorKind::SplittingArgumentsFailed { + arguments: s.to_string(), + source: err, + })?, + ) } diff --git a/crates/core/src/repository/warm_up.rs b/crates/core/src/repository/warm_up.rs index 14ea76da5..211de6778 100644 --- a/crates/core/src/repository/warm_up.rs +++ b/crates/core/src/repository/warm_up.rs @@ -6,13 +6,22 @@ use rayon::ThreadPoolBuilder; use crate::{ backend::{FileType, ReadBackend}, - error::{RepositoryErrorKind, RusticResult}, progress::{Progress, ProgressBars}, repofile::packfile::PackId, repository::Repository, CommandInput, }; +/// [`WarmupErrorKind`] describes the errors that can be returned from Warmup +#[derive(thiserror::Error, Debug, displaydoc::Display)] +#[non_exhaustive] +pub enum WarmupErrorKind { + /// Error in warm-up command + General, +} + +pub(crate) type WarmupResult = Result; + pub(super) mod constants { /// The maximum number of reader threads to use for warm-up. pub(super) const MAX_READER_THREADS_NUM: usize = 20; @@ -35,7 +44,7 @@ pub(super) mod constants { pub(crate) fn warm_up_wait( repo: &Repository, packs: impl ExactSizeIterator, -) -> RusticResult<()> { +) -> WarmupResult<()> { warm_up(repo, packs)?; if let Some(wait) = repo.opts.warm_up_wait { let p = repo.pb.progress_spinner(format!("waiting {wait}...")); @@ -62,7 +71,7 @@ pub(crate) fn warm_up_wait( pub(crate) fn warm_up( repo: &Repository, packs: impl ExactSizeIterator, -) -> RusticResult<()> { +) -> WarmupResult<()> { if let Some(warm_up_cmd) = &repo.opts.warm_up_command { warm_up_command(packs, warm_up_cmd, &repo.pb)?; } else if repo.be.needs_warm_up() { @@ -88,7 +97,7 @@ fn warm_up_command( packs: impl ExactSizeIterator, command: &CommandInput, pb: &P, -) -> RusticResult<()> { +) -> WarmupResult<()> { let p = pb.progress_counter("warming up packs..."); p.set_length(packs.len() as u64); for pack in packs { @@ -98,7 +107,10 @@ fn warm_up_command( .map(|c| c.replace("%id", &pack.to_hex())) .collect(); debug!("calling {command:?}..."); - let status = Command::new(command.command()).args(&args).status()?; + let status = Command::new(command.command()) + .args(&args) + .status() + .map_err(|_err| todo!("Error transition"))?; if !status.success() { warn!("warm-up command was not successful for pack {pack:?}. {status}"); } @@ -122,14 +134,14 @@ fn warm_up_command( fn warm_up_repo( repo: &Repository, packs: impl ExactSizeIterator, -) -> RusticResult<()> { +) -> WarmupResult<()> { let progress_bar = repo.pb.progress_counter("warming up packs..."); progress_bar.set_length(packs.len() as u64); let pool = ThreadPoolBuilder::new() .num_threads(constants::MAX_READER_THREADS_NUM) .build() - .map_err(RepositoryErrorKind::FromThreadPoolbilderError)?; + .map_err(|_err| todo!("Error transition"))?; let progress_bar_ref = &progress_bar; let backend = &repo.be; pool.in_place_scope(|scope| { diff --git a/crates/core/src/vfs.rs b/crates/core/src/vfs.rs index 5ae211b9a..0ac592563 100644 --- a/crates/core/src/vfs.rs +++ b/crates/core/src/vfs.rs @@ -18,16 +18,28 @@ pub use crate::vfs::webdavfs::WebDavFS; use crate::{ blob::{tree::TreeId, BlobId, DataId}, - error::VfsErrorKind, - repofile::{BlobType, Metadata, Node, NodeType, SnapshotFile}, -}; -use crate::{ + error::RusticResult, index::ReadIndex, + repofile::{BlobType, Metadata, Node, NodeType, SnapshotFile}, repository::{IndexedFull, IndexedTree, Repository}, vfs::format::FormattedSnapshot, - RusticResult, }; +/// [`VfsErrorKind`] describes the errors that can be returned from the Virtual File System +#[derive(thiserror::Error, Debug, displaydoc::Display)] +pub enum VfsErrorKind { + /// No directory entries for symlink found: `{0:?}` + NoDirectoryEntriesForSymlinkFound(OsString), + /// Directory exists as non-virtual directory + DirectoryExistsAsNonVirtual, + /// Only normal paths allowed + OnlyNormalPathsAreAllowed, + /// Name `{0:?}`` doesn't exist + NameDoesNotExist(OsString), +} + +pub(crate) type VfsResult = Result; + #[derive(Debug, Clone, Copy)] /// `IdenticalSnapshot` describes how to handle identical snapshots. pub enum IdenticalSnapshot { @@ -94,7 +106,7 @@ impl VfsTree { /// /// [`VfsErrorKind::DirectoryExistsAsNonVirtual`]: crate::error::VfsErrorKind::DirectoryExistsAsNonVirtual /// [`VfsErrorKind::OnlyNormalPathsAreAllowed`]: crate::error::VfsErrorKind::OnlyNormalPathsAreAllowed - fn add_tree(&mut self, path: &Path, new_tree: Self) -> RusticResult<()> { + fn add_tree(&mut self, path: &Path, new_tree: Self) -> VfsResult<()> { let mut tree = self; let mut components = path.components(); let Some(Component::Normal(last)) = components.next_back() else { @@ -137,7 +149,7 @@ impl VfsTree { /// # Returns /// /// If the path is within a real repository tree, this returns the [`VfsTree::RusticTree`] and the remaining path - fn get_path(&self, path: &Path) -> RusticResult> { + fn get_path(&self, path: &Path) -> VfsResult> { let mut tree = self; let mut components = path.components(); loop { @@ -249,7 +261,7 @@ impl Vfs { let filename = path.file_name().map(OsStr::to_os_string); let parent_path = path.parent().map(Path::to_path_buf); - // Save pathes for latest entries, if requested + // Save paths for latest entries, if requested if matches!(latest_option, Latest::AsLink) { _ = dirs_for_link.insert(parent_path.clone(), filename.clone()); } @@ -264,10 +276,12 @@ impl Vfs { && last_tree == snap.tree { if let Some(name) = last_name { - tree.add_tree(path, VfsTree::Link(name))?; + tree.add_tree(path, VfsTree::Link(name)) + .map_err(|_err| todo!("Error transition"))?; } } else { - tree.add_tree(path, VfsTree::RusticTree(snap.tree))?; + tree.add_tree(path, VfsTree::RusticTree(snap.tree)) + .map_err(|_err| todo!("Error transition"))?; } } last_parent = parent_path; @@ -282,7 +296,8 @@ impl Vfs { for (path, target) in dirs_for_link { if let (Some(mut path), Some(target)) = (path, target) { path.push("latest"); - tree.add_tree(&path, VfsTree::Link(target))?; + tree.add_tree(&path, VfsTree::Link(target)) + .map_err(|_err| todo!("Error transition"))?; } } } @@ -290,7 +305,8 @@ impl Vfs { for (path, subtree) in dirs_for_snap { if let Some(mut path) = path { path.push("latest"); - tree.add_tree(&path, VfsTree::RusticTree(subtree))?; + tree.add_tree(&path, VfsTree::RusticTree(subtree)) + .map_err(|_err| todo!("Error transition"))?; } } } @@ -321,7 +337,11 @@ impl Vfs { path: &Path, ) -> RusticResult { let meta = Metadata::default(); - match self.tree.get_path(path)? { + match self + .tree + .get_path(path) + .map_err(|_err| todo!("Error transition"))? + { VfsPath::RusticPath(tree_id, path) => Ok(repo.node_from_path(*tree_id, &path)?), VfsPath::VirtualTree(_) => { Ok(Node::new(String::new(), NodeType::Dir, meta, None, None)) @@ -364,7 +384,11 @@ impl Vfs { repo: &Repository, path: &Path, ) -> RusticResult> { - let result = match self.tree.get_path(path)? { + let result = match self + .tree + .get_path(path) + .map_err(|_err| todo!("Error transition"))? + { VfsPath::RusticPath(tree_id, path) => { let node = repo.node_from_path(*tree_id, &path)?; if node.is_dir() { @@ -385,7 +409,8 @@ impl Vfs { }) .collect(), VfsPath::Link(str) => { - return Err(VfsErrorKind::NoDirectoryEntriesForSymlinkFound(str.clone()).into()); + return Err(VfsErrorKind::NoDirectoryEntriesForSymlinkFound(str.clone())) + .map_err(|_err| todo!("Error transition")); } }; Ok(result) @@ -461,7 +486,7 @@ impl OpenFile { }) .collect(); - // content is assumed to be partioned, so we add a starts_at:MAX entry + // content is assumed to be partitioned, so we add a starts_at:MAX entry content.push(BlobInfo { id: DataId::default(), starts_at: usize::MAX, @@ -496,21 +521,30 @@ impl OpenFile { // find the start of relevant blobs => find the largest index such that self.content[i].starts_at <= offset, but // self.content[i+1] > offset (note that a last dummy element has been added) let mut i = self.content.partition_point(|c| c.starts_at <= offset) - 1; + offset -= self.content[i].starts_at; + let mut result = BytesMut::with_capacity(length); while length > 0 && i < self.content.len() - 1 { let data = repo.get_blob_cached(&BlobId::from(*self.content[i].id), BlobType::Data)?; + if offset > data.len() { // we cannot read behind the blob. This only happens if offset is too large to fit in the last blob break; } + let to_copy = (data.len() - offset).min(length); + result.extend_from_slice(&data[offset..offset + to_copy]); + offset = 0; + length -= to_copy; + i += 1; } + Ok(result.into()) } } diff --git a/crates/core/tests/integration.rs b/crates/core/tests/integration.rs index 4a210234d..57d5a63e1 100644 --- a/crates/core/tests/integration.rs +++ b/crates/core/tests/integration.rs @@ -253,7 +253,7 @@ fn test_backup_with_tar_gz_passes( // re-read index let repo = repo.to_indexed_ids()?; - // third backup with tags and explicitely given parent + // third backup with tags and explicitly given parent let snap = SnapshotOptions::default() .tags([StringList::from_str("a,b")?]) .to_snapshot()?;