Skip to content

Commit

Permalink
refactor(commands): decouple logic from option structs for check, pru…
Browse files Browse the repository at this point in the history
…ne, repair, key, and restore (#317)

I saw in #224 that `CheckOptions::run()` has been moved to type
`CheckResultsCollector`. In other commands we actually have functions,
not associated to any type, that take their options as parameters. I
applied this here to check, prune, repair, key, and restore as well.
Because I think it reduces coupling and increases testability.

The idea behind having these standalone functions is, that check, prune,
repair, key, and restore are not run on their options (as a method of
e.g. `CheckOptions`), but rather take parameters, where one is the e.g.
`CheckOption` itself.

In principle: methods that implement commands don't operate on their own
options and have side effects on other types - options are passed into
functions as parameters.

Furthermore, a `check()` on `CheckOptions` sounds if it validates these
options itself, rather than being run on an external type. I think from
that POV it also makes sense to have such freestanding functions as
entry point to our commands in `rustic_core`.

---------

Signed-off-by: simonsan <[email protected]>
  • Loading branch information
simonsan authored Oct 9, 2024
1 parent 428fa7a commit 29b2a78
Show file tree
Hide file tree
Showing 10 changed files with 1,066 additions and 965 deletions.
172 changes: 88 additions & 84 deletions crates/core/src/commands/check.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,106 +136,110 @@ pub struct CheckOptions {
pub read_data_subset: ReadSubsetOption,
}

impl CheckOptions {
/// Runs the `check` command
///
/// # Type Parameters
///
/// * `P` - The progress bar type.
/// * `S` - The state the repository is in.
///
/// # Arguments
///
/// * `repo` - The repository to check
///
/// # Errors
///
/// If the repository is corrupted
pub(crate) fn run<P: ProgressBars, S: Open>(
self,
repo: &Repository<P, S>,
trees: Vec<TreeId>,
) -> RusticResult<()> {
let be = repo.dbe();
let cache = repo.cache();
let hot_be = &repo.be_hot;
let raw_be = repo.dbe();
let pb = &repo.pb;
if !self.trust_cache {
if let Some(cache) = &cache {
for file_type in [FileType::Snapshot, FileType::Index] {
// list files in order to clean up the cache
//
// This lists files here and later when reading index / checking snapshots
// TODO: Only list the files once...
_ = be
.list_with_size(file_type)
.map_err(RusticErrorKind::Backend)?;

let p = pb.progress_bytes(format!("checking {file_type:?} in cache..."));
// TODO: Make concurrency (20) customizable
check_cache_files(20, cache, raw_be, file_type, &p)?;
}
/// Runs the `check` command
///
/// # Type Parameters
///
/// * `P` - The progress bar type.
/// * `S` - The state the repository is in.
///
/// # Arguments
///
/// * `repo` - The repository to check
/// * `opts` - The check options to use
/// * `trees` - The trees to check
///
/// # Errors
///
/// If the repository is corrupted
///
/// # Panics
///
// TODO: Add panics
pub(crate) fn check_repository<P: ProgressBars, S: Open>(
repo: &Repository<P, S>,
opts: CheckOptions,
trees: Vec<TreeId>,
) -> RusticResult<()> {
let be = repo.dbe();
let cache = repo.cache();
let hot_be = &repo.be_hot;
let raw_be = repo.dbe();
let pb = &repo.pb;
if !opts.trust_cache {
if let Some(cache) = &cache {
for file_type in [FileType::Snapshot, FileType::Index] {
// list files in order to clean up the cache
//
// This lists files here and later when reading index / checking snapshots
// TODO: Only list the files once...
_ = be
.list_with_size(file_type)
.map_err(RusticErrorKind::Backend)?;

let p = pb.progress_bytes(format!("checking {file_type:?} in cache..."));
// TODO: Make concurrency (20) customizable
check_cache_files(20, cache, raw_be, file_type, &p)?;
}
}
}

if let Some(hot_be) = hot_be {
for file_type in [FileType::Snapshot, FileType::Index] {
check_hot_files(raw_be, hot_be, file_type, pb)?;
}
if let Some(hot_be) = hot_be {
for file_type in [FileType::Snapshot, FileType::Index] {
check_hot_files(raw_be, hot_be, file_type, pb)?;
}
}

let index_collector = check_packs(be, hot_be, pb)?;
let index_collector = check_packs(be, hot_be, pb)?;

if let Some(cache) = &cache {
let p = pb.progress_spinner("cleaning up packs from cache...");
let ids: Vec<_> = index_collector
.tree_packs()
.iter()
.map(|(id, size)| (**id, *size))
.collect();
if let Err(err) = cache.remove_not_in_list(FileType::Pack, &ids) {
warn!("Error in cache backend removing pack files: {err}");
}
p.finish();
if let Some(cache) = &cache {
let p = pb.progress_spinner("cleaning up packs from cache...");
let ids: Vec<_> = index_collector
.tree_packs()
.iter()
.map(|(id, size)| (**id, *size))
.collect();
if let Err(err) = cache.remove_not_in_list(FileType::Pack, &ids) {
warn!("Error in cache backend removing pack files: {err}");
}
p.finish();

if !self.trust_cache {
let p = pb.progress_bytes("checking packs in cache...");
// TODO: Make concurrency (5) customizable
check_cache_files(5, cache, raw_be, FileType::Pack, &p)?;
}
if !opts.trust_cache {
let p = pb.progress_bytes("checking packs in cache...");
// TODO: Make concurrency (5) customizable
check_cache_files(5, cache, raw_be, FileType::Pack, &p)?;
}
}

let index_be = GlobalIndex::new_from_index(index_collector.into_index());
let index_be = GlobalIndex::new_from_index(index_collector.into_index());

let packs = check_trees(be, &index_be, trees, pb)?;
let packs = check_trees(be, &index_be, trees, pb)?;

if self.read_data {
let packs = index_be
.into_index()
.into_iter()
.filter(|p| packs.contains(&p.id));
if opts.read_data {
let packs = index_be
.into_index()
.into_iter()
.filter(|p| packs.contains(&p.id));

let packs = self.read_data_subset.apply(packs);
let packs = opts.read_data_subset.apply(packs);

repo.warm_up_wait(packs.iter().map(|pack| pack.id))?;
repo.warm_up_wait(packs.iter().map(|pack| pack.id))?;

let total_pack_size = packs.iter().map(|pack| u64::from(pack.pack_size())).sum();
let p = pb.progress_bytes("reading pack data...");
p.set_length(total_pack_size);
let total_pack_size = packs.iter().map(|pack| u64::from(pack.pack_size())).sum();
let p = pb.progress_bytes("reading pack data...");
p.set_length(total_pack_size);

packs.into_par_iter().for_each(|pack| {
let id = pack.id;
let data = be.read_full(FileType::Pack, &id).unwrap();
match check_pack(be, pack, data, &p) {
Ok(()) => {}
Err(err) => error!("Error reading pack {id} : {err}",),
}
});
p.finish();
}
Ok(())
packs.into_par_iter().for_each(|pack| {
let id = pack.id;
let data = be.read_full(FileType::Pack, &id).unwrap();
match check_pack(be, pack, data, &p) {
Ok(()) => {}
Err(err) => error!("Error reading pack {id} : {err}",),
}
});
p.finish();
}
Ok(())
}

/// Checks if all files in the backend are also in the hot backend
Expand Down
4 changes: 2 additions & 2 deletions crates/core/src/commands/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::{
chunker::random_poly,
commands::{
config::{save_config, ConfigOptions},
key::KeyOptions,
key::{init_key, KeyOptions},
},
crypto::aespoly1305::Key,
error::{RusticErrorKind, RusticResult},
Expand Down Expand Up @@ -86,7 +86,7 @@ pub(crate) fn init_with_config<P, S>(
config: &ConfigFile,
) -> RusticResult<Key> {
repo.be.create().map_err(RusticErrorKind::Backend)?;
let (key, id) = key_opts.init_key(repo, pass)?;
let (key, id) = init_key(repo, key_opts, pass)?;
info!("key {id} successfully added.");
save_config(repo, config.clone(), key)?;

Expand Down
168 changes: 87 additions & 81 deletions crates/core/src/commands/key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,88 +28,94 @@ pub struct KeyOptions {
pub with_created: bool,
}

impl KeyOptions {
/// Add the current key to the repository.
///
/// # Type Parameters
///
/// * `P` - The progress bar type.
/// * `S` - The state the repository is in.
///
/// # Arguments
///
/// * `repo` - The repository to add the key to.
/// * `pass` - The password to encrypt the key with.
///
/// # Errors
///
/// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized.
///
/// # Returns
///
/// The id of the key.
///
/// [`CommandErrorKind::FromJsonError`]: crate::error::CommandErrorKind::FromJsonError
pub(crate) fn add_key<P, S: Open>(
&self,
repo: &Repository<P, S>,
pass: &str,
) -> RusticResult<KeyId> {
let key = repo.dbe().key();
self.add(repo, pass, *key)
}
/// Add the current key to the repository.
///
/// # Type Parameters
///
/// * `P` - The progress bar type
/// * `S` - The state the repository is in
///
/// # Arguments
///
/// * `repo` - The repository to add the key to
/// * `opts` - The key options to use
/// * `pass` - The password to encrypt the key with
///
/// # Errors
///
/// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized
///
/// # Returns
///
/// The id of the key.
///
/// [`CommandErrorKind::FromJsonError`]: crate::error::CommandErrorKind::FromJsonError
pub(crate) fn add_current_key_to_repo<P, S: Open>(
repo: &Repository<P, S>,
opts: &KeyOptions,
pass: &str,
) -> RusticResult<KeyId> {
let key = repo.dbe().key();
add_key_to_repo(repo, opts, pass, *key)
}

/// Initialize a new key.
///
/// # Type Parameters
///
/// * `P` - The progress bar type.
/// * `S` - The state the repository is in.
///
/// # Arguments
///
/// * `repo` - The repository to add the key to.
/// * `pass` - The password to encrypt the key with.
///
/// # Returns
///
/// A tuple of the key and the id of the key.
pub(crate) fn init_key<P, S>(
&self,
repo: &Repository<P, S>,
pass: &str,
) -> RusticResult<(Key, KeyId)> {
// generate key
let key = Key::new();
Ok((key, self.add(repo, pass, key)?))
}
/// Initialize a new key.
///
/// # Type Parameters
///
/// * `P` - The progress bar type
/// * `S` - The state the repository is in
///
/// # Arguments
///
/// * `repo` - The repository to add the key to
/// * `opts` - The key options to use
/// * `pass` - The password to encrypt the key with
///
/// # Returns
///
/// A tuple of the key and the id of the key.
pub(crate) fn init_key<P, S>(
repo: &Repository<P, S>,
opts: &KeyOptions,
pass: &str,
) -> RusticResult<(Key, KeyId)> {
// generate key
let key = Key::new();
Ok((key, add_key_to_repo(repo, opts, pass, key)?))
}

/// Add a key to the repository.
///
/// # Arguments
///
/// * `repo` - The repository to add the key to.
/// * `pass` - The password to encrypt the key with.
/// * `key` - The key to add.
///
/// # Errors
///
/// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized.
///
/// # Returns
///
/// The id of the key.
///
/// [`CommandErrorKind::FromJsonError`]: crate::error::CommandErrorKind::FromJsonError
fn add<P, S>(&self, repo: &Repository<P, S>, pass: &str, key: Key) -> RusticResult<KeyId> {
let ko = self.clone();
let keyfile = KeyFile::generate(key, &pass, ko.hostname, ko.username, ko.with_created)?;
/// Add a key to the repository.
///
/// # Arguments
///
/// * `repo` - The repository to add the key to
/// * `opts` - The key options to use
/// * `pass` - The password to encrypt the key with
/// * `key` - The key to add
///
/// # Errors
///
/// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized.
///
/// # Returns
///
/// The id of the key.
///
/// [`CommandErrorKind::FromJsonError`]: crate::error::CommandErrorKind::FromJsonError
pub(crate) fn add_key_to_repo<P, S>(
repo: &Repository<P, S>,
opts: &KeyOptions,
pass: &str,
key: Key,
) -> RusticResult<KeyId> {
let ko = opts.clone();
let keyfile = KeyFile::generate(key, &pass, ko.hostname, ko.username, ko.with_created)?;

let data = serde_json::to_vec(&keyfile).map_err(CommandErrorKind::FromJsonError)?;
let id = KeyId::from(hash(&data));
repo.be
.write_bytes(FileType::Key, &id, false, data.into())
.map_err(RusticErrorKind::Backend)?;
Ok(id)
}
let data = serde_json::to_vec(&keyfile).map_err(CommandErrorKind::FromJsonError)?;
let id = KeyId::from(hash(&data));
repo.be
.write_bytes(FileType::Key, &id, false, data.into())
.map_err(RusticErrorKind::Backend)?;
Ok(id)
}
Loading

0 comments on commit 29b2a78

Please sign in to comment.