diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..3a7baa3 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,17 @@ +name: CI +on: [push, pull_request] + +jobs: + test: + uses: aschampion/gh-actions/.github/workflows/rust-test.yml@v0 + with: + msrv: 1.56 + + semver-checks: + uses: aschampion/gh-actions/.github/workflows/rust-semver-checks.yml@v0 + + publish: + uses: aschampion/gh-actions/.github/workflows/rust-publish.yml@v0 + needs: [test] + if: github.event_name == 'push' && contains(github.ref, 'refs/tags/v') + secrets: inherit diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000..d82647b --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1 @@ +imports_layout = "Vertical" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 7e935bb..0000000 --- a/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -notifications: - email: false -language: rust -sudo: required -rust: - - stable - - beta - - nightly -addons: - apt: - packages: - - libssl-dev -cache: cargo -matrix: - allow_failures: - - rust: nightly -before_cache: | - if [[ "$TRAVIS_RUST_VERSION" == nightly ]]; then - RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install cargo-tarpaulin - fi -before_script: - - bash -c 'if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then - rustup component add clippy; - fi' -script: - - RUSTFLAGS="-D warnings" cargo build --verbose - - bash -c 'if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then - cargo clippy --all -- -D warnings; - fi' - - cargo test --verbose - - cargo test --examples --verbose -after_success: | - if [[ "$TRAVIS_RUST_VERSION" == nightly ]]; then - cargo tarpaulin --out Xml --no-default-features --features=filesystem,use_ndarray,gzip --run-types Doctests Tests - bash <(curl -s https://codecov.io/bash) - fi diff --git a/CHANGELOG.md b/CHANGELOG.md index df603ec..7c4a906 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + + +## [Unreleased] +### Changed +- Increase MSRV 1.39 -> 1.56 for tool and dependency upgrades. ## [0.7.6] - 2020-10-26 ### Added @@ -180,3 +185,23 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [0.1.0] - 2018-02-28 + + + +[Unreleased]: https://github.com/aschampion/rust-n5/compare/0.7.6...HEAD +[0.7.6]: https://github.com/aschampion/rust-n5/compare/0.7.5...0.7.6 +[0.7.5]: https://github.com/aschampion/rust-n5/compare/0.7.4...0.7.5 +[0.7.4]: https://github.com/aschampion/rust-n5/compare/0.7.3...0.7.4 +[0.7.3]: https://github.com/aschampion/rust-n5/compare/0.7.2...0.7.3 +[0.7.2]: https://github.com/aschampion/rust-n5/compare/0.7.1...0.7.2 +[0.7.1]: https://github.com/aschampion/rust-n5/compare/0.7.0...0.7.1 +[0.7.0]: https://github.com/aschampion/rust-n5/compare/0.6.1...0.7.0 +[0.6.1]: https://github.com/aschampion/rust-n5/compare/0.6.0...0.6.1 +[0.6.0]: https://github.com/aschampion/rust-n5/compare/0.5.0...0.6.0 +[0.5.0]: https://github.com/aschampion/rust-n5/compare/0.4.0...0.5.0 +[0.4.0]: https://github.com/aschampion/rust-n5/compare/0.3.0...0.4.0 +[0.3.0]: https://github.com/aschampion/rust-n5/compare/0.2.3...0.3.0 +[0.2.3]: https://github.com/aschampion/rust-n5/compare/0.2.2...0.2.3 +[0.2.2]: https://github.com/aschampion/rust-n5/compare/0.2.1...0.2.2 +[0.2.1]: https://github.com/aschampion/rust-n5/compare/0.2.0...0.2.1 +[0.2.0]: https://github.com/aschampion/rust-n5/compare/0.1.0...0.2.0 diff --git a/Cargo.toml b/Cargo.toml index 47fe56f..220bfd3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ name = "n5" version = "0.7.6" edition = "2018" +rust-version = "1.39" license = "MIT/Apache-2.0" authors = ["Andrew Champion "] description = "Rust implementation of the N5 tensor file system format" diff --git a/README.md b/README.md index bd7620e..8852ed6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# N5 [![Build Status](https://travis-ci.org/aschampion/rust-n5.svg?branch=master)](https://travis-ci.org/aschampion/rust-n5) [![Coverage](https://codecov.io/gh/aschampion/rust-n5/branch/master/graph/badge.svg)](https://codecov.io/gh/aschampion/rust-n5) +# N [![Build Status](https://github.com/aschampion/rust-n5/actions/workflows/ci.yml/badge.svg)](https://github.com/aschampion/rust-n5/actions/workflows/ci.yml/) [![Coverage](https://codecov.io/gh/aschampion/rust-n5/branch/master/graph/badge.svg)](https://codecov.io/gh/aschampion/rust-n5) A (mostly pure) rust implementation of the [N5 "Not HDF5" n-dimensional tensor file system storage format](https://github.com/saalfeldlab/n5) created by the Saalfeld lab at Janelia Research Campus. @@ -10,7 +10,7 @@ Compatible with Java N5 Version 2.1.3. ## Minimum supported Rust version (MSRV) -Stable 1.39 +Stable 1.56 ## Quick start diff --git a/benches/parallel_write.rs b/benches/parallel_write.rs index a50e502..ab105c8 100644 --- a/benches/parallel_write.rs +++ b/benches/parallel_write.rs @@ -12,10 +12,8 @@ //! will take several hours to run. #![feature(test)] - extern crate test; - use std::fs::File; use std::io::BufReader; @@ -38,7 +36,6 @@ use tiff::decoder::{ use n5::prelude::*; use n5::smallvec::smallvec; - lazy_static! { static ref TEST_IMAGE: Vec = { let mut pixels = Vec::with_capacity(163 * 163 * 93); @@ -54,7 +51,7 @@ lazy_static! { for p in img { pixels.push(p as i8); } - }, + } } decoder.next_image().unwrap(); @@ -66,15 +63,12 @@ lazy_static! { const BLOCK_DIM: u32 = 64; const N_BLOCKS: u64 = 5; -fn write( - n: &N5, - compression: &CompressionType, - block_data: &[T], - pool_size: usize, -) where T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default + Sync + Send, - N5: N5Writer + Sync + Send + Clone + 'static, - SliceDataBlock>: n5::WriteableDataBlock { - +fn write(n: &N5, compression: &CompressionType, block_data: &[T], pool_size: usize) +where + T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default + Sync + Send, + N5: N5Writer + Sync + Send + Clone + 'static, + SliceDataBlock>: n5::WriteableDataBlock, +{ let block_size = smallvec![BLOCK_DIM; 3]; let data_attrs = DatasetAttributes::new( smallvec![u64::from(BLOCK_DIM) * N_BLOCKS; 3], @@ -83,9 +77,11 @@ fn write( compression.clone(), ); - let path_name = format!("dataset.{:?}.{}", + let path_name = format!( + "dataset.{:?}.{}", data_attrs.get_data_type(), - data_attrs.get_compression()); + data_attrs.get_compression() + ); n.create_dataset(&path_name, &data_attrs) .expect("Failed to create dataset"); @@ -104,10 +100,7 @@ fn write( let pn = path_name.clone(); let da = data_attrs.clone(); all_jobs.push(pool.spawn_fn(move || { - let block_in = SliceDataBlock::new( - bs, - smallvec![x, y, z], - bd); + let block_in = SliceDataBlock::new(bs, smallvec![x, y, z], bd); ni.write_block(&pn, &da, &block_in) .expect("Failed to write block"); Ok(0) @@ -120,30 +113,36 @@ fn write( } fn bench_write_dtype_compression(b: &mut Bencher, pool_size: usize) - where - T: 'static + ReflectedType + Default + PartialEq + std::fmt::Debug + - std::convert::From + Sync + Send, - C: compression::Compression, - CompressionType: std::convert::From, - SliceDataBlock>: n5::WriteableDataBlock { - +where + T: 'static + + ReflectedType + + Default + + PartialEq + + std::fmt::Debug + + std::convert::From + + Sync + + Send, + C: compression::Compression, + CompressionType: std::convert::From, + SliceDataBlock>: n5::WriteableDataBlock, +{ let dir = tempdir::TempDir::new("rust_n5_integration_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); let compression = CompressionType::new::(); // TODO: load the test image data. // let block_data: Vec = vec![T::default(); (BLOCK_DIM * BLOCK_DIM * BLOCK_DIM) as usize]; - let block_data = TEST_IMAGE.iter().take((BLOCK_DIM * BLOCK_DIM * BLOCK_DIM) as usize) + let block_data = TEST_IMAGE + .iter() + .take((BLOCK_DIM * BLOCK_DIM * BLOCK_DIM) as usize) .map(|&v| T::from(v)) .collect::>(); b.iter(|| write(&n, &compression, &block_data, pool_size)); - b.bytes = - (BLOCK_DIM * BLOCK_DIM * BLOCK_DIM) as u64 * - (N_BLOCKS * N_BLOCKS * N_BLOCKS) as u64 * - std::mem::size_of::() as u64; + b.bytes = (BLOCK_DIM * BLOCK_DIM * BLOCK_DIM) as u64 + * (N_BLOCKS * N_BLOCKS * N_BLOCKS) as u64 + * std::mem::size_of::() as u64; } // 1 Thread. Can't macro this because of the concat_idents! limitation. diff --git a/benches/simple.rs b/benches/simple.rs index a4fd1f9..0f0f101 100644 --- a/benches/simple.rs +++ b/benches/simple.rs @@ -1,10 +1,8 @@ //! # Simple In-memory Read/Write Benchmarks #![feature(test)] - extern crate test; - use rand::{ distributions::Standard, Rng, @@ -12,22 +10,19 @@ use rand::{ use test::Bencher; use n5::prelude::*; +use n5::smallvec::smallvec; use n5::{ DefaultBlock, DefaultBlockReader, DefaultBlockWriter, }; -use n5::smallvec::smallvec; - -fn test_block_compression_rw( - compression: compression::CompressionType, - b: &mut Bencher -) where T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default, - rand::distributions::Standard: rand::distributions::Distribution, - VecDataBlock: n5::ReadableDataBlock + n5::WriteableDataBlock, +fn test_block_compression_rw(compression: compression::CompressionType, b: &mut Bencher) +where + T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default, + rand::distributions::Standard: rand::distributions::Distribution, + VecDataBlock: n5::ReadableDataBlock + n5::WriteableDataBlock, { - let data_attrs = DatasetAttributes::new( smallvec![1024, 1024, 1024], smallvec![64, 64, 64], @@ -41,20 +36,20 @@ fn test_block_compression_rw( let block_in = VecDataBlock::new( data_attrs.get_block_size().into(), smallvec![0, 0, 0], - block_data.clone()); + block_data.clone(), + ); let mut inner: Vec = Vec::new(); b.iter(|| { - DefaultBlock::write_block( - &mut inner, - &data_attrs, - &block_in).expect("write_block failed"); + DefaultBlock::write_block(&mut inner, &data_attrs, &block_in).expect("write_block failed"); let _block_out = >::read_block( &inner[..], &data_attrs, - smallvec![0, 0, 0]).expect("read_block failed"); + smallvec![0, 0, 0], + ) + .expect("read_block failed"); }); b.bytes = (data_attrs.get_block_num_elements() * data_attrs.get_data_type().size_of()) as u64; diff --git a/release.toml b/release.toml new file mode 100644 index 0000000..7b2aee6 --- /dev/null +++ b/release.toml @@ -0,0 +1,11 @@ +publish = false +pre-release-commit-message = "Version {{version}}" +pre-release-replacements = [ + {file="CHANGELOG.md", search="^## \\[Unreleased\\]", replace="## [{{version}}] - {{date}}", exactly=1}, + {file="CHANGELOG.md", search="\\[Unreleased\\]", replace="[{{version}}]", min=1}, + {file="CHANGELOG.md", search="\\.\\.\\.HEAD", replace="...{{tag_name}}", exactly=1}, +] +post-release-replacements = [ + {file="CHANGELOG.md", search="", replace="\n## [Unreleased]\n\n", exactly=1}, + {file="CHANGELOG.md", search="", replace="\n[Unreleased]: https://github.com/aschampion/rust-n5/compare/{{tag_name}}...HEAD", exactly=1}, +] \ No newline at end of file diff --git a/src/compression/bzip.rs b/src/compression/bzip.rs index 033013b..6490dfb 100644 --- a/src/compression/bzip.rs +++ b/src/compression/bzip.rs @@ -1,17 +1,17 @@ -use std::io::{Read, Write}; +use std::io::{ + Read, + Write, +}; -use bzip2::Compression as BzCompression; use bzip2::read::BzDecoder; use bzip2::write::BzEncoder; +use bzip2::Compression as BzCompression; use serde::{ Deserialize, Serialize, }; -use super::{ - Compression, -}; - +use super::Compression; #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] #[serde(rename_all = "camelCase")] @@ -20,7 +20,9 @@ pub struct Bzip2Compression { block_size: u8, } -fn default_bzip_block_size() -> u8 {9} +fn default_bzip_block_size() -> u8 { + 9 +} impl Default for Bzip2Compression { fn default() -> Bzip2Compression { @@ -36,7 +38,10 @@ impl Compression for Bzip2Compression { } fn encoder<'a, W: Write + 'a>(&self, w: W) -> Box { - Box::new(BzEncoder::new(w, BzCompression::new(u32::from(self.block_size)))) + Box::new(BzEncoder::new( + w, + BzCompression::new(u32::from(self.block_size)), + )) } } @@ -46,6 +51,7 @@ mod tests { use crate::compression::CompressionType; // Example from the n5 documentation spec. + #[rustfmt::skip] const TEST_BLOCK_I16_BZIP2: [u8; 59] = [ 0x00, 0x00, 0x00, 0x03, @@ -69,7 +75,8 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_BZIP2.as_ref(), - CompressionType::Bzip2(Bzip2Compression::default())); + CompressionType::Bzip2(Bzip2Compression::default()), + ); } #[test] @@ -78,11 +85,14 @@ mod tests { fn test_write_doc_spec_block() { crate::tests::test_write_doc_spec_block( TEST_BLOCK_I16_BZIP2.as_ref(), - CompressionType::Bzip2(Bzip2Compression::default())); + CompressionType::Bzip2(Bzip2Compression::default()), + ); } #[test] fn test_rw() { - crate::tests::test_block_compression_rw(CompressionType::Bzip2(Bzip2Compression::default())); + crate::tests::test_block_compression_rw( + CompressionType::Bzip2(Bzip2Compression::default()), + ); } } diff --git a/src/compression/gzip.rs b/src/compression/gzip.rs index 43d1d8b..d6691da 100644 --- a/src/compression/gzip.rs +++ b/src/compression/gzip.rs @@ -1,17 +1,17 @@ -use std::io::{Read, Write}; +use std::io::{ + Read, + Write, +}; -use flate2::Compression as GzCompression; use flate2::read::GzDecoder; use flate2::write::GzEncoder; +use flate2::Compression as GzCompression; use serde::{ Deserialize, Serialize, }; -use super::{ - Compression, -}; - +use super::Compression; #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] #[serde(rename_all = "camelCase")] @@ -35,7 +35,9 @@ impl GzipCompression { } } -fn default_gzip_level() -> i32 {-1} +fn default_gzip_level() -> i32 { + -1 +} impl Default for GzipCompression { fn default() -> GzipCompression { @@ -61,6 +63,7 @@ mod tests { use crate::compression::CompressionType; // Example from the n5 documentation spec. + #[rustfmt::skip] const TEST_BLOCK_I16_GZIP: [u8; 48] = [ 0x00, 0x00, 0x00, 0x03, @@ -81,7 +84,8 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_GZIP.as_ref(), - CompressionType::Gzip(GzipCompression::default())); + CompressionType::Gzip(GzipCompression::default()), + ); } #[test] @@ -93,7 +97,8 @@ mod tests { fudge_test_block[25] = 255; crate::tests::test_write_doc_spec_block( &fudge_test_block, - CompressionType::Gzip(GzipCompression::default())); + CompressionType::Gzip(GzipCompression::default()), + ); } #[test] diff --git a/src/compression/lz.rs b/src/compression/lz.rs index 80cb744..7c7c748 100644 --- a/src/compression/lz.rs +++ b/src/compression/lz.rs @@ -1,4 +1,8 @@ -use std::io::{Read, Result, Write}; +use std::io::{ + Read, + Result, + Write, +}; use lz4::{ BlockMode, @@ -12,10 +16,7 @@ use serde::{ Serialize, }; -use super::{ - Compression, -}; - +use super::Compression; // From: https://github.com/bozaro/lz4-rs/issues/9 // Kludge to finish Lz4 encoder on Drop. @@ -41,7 +42,6 @@ impl Drop for Wrapper { } } - #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] #[serde(rename_all = "camelCase")] pub struct Lz4Compression { @@ -65,7 +65,9 @@ impl Lz4Compression { } } -fn default_lz4_block_size() -> i32 {65_536} +fn default_lz4_block_size() -> i32 { + 65_536 +} impl Default for Lz4Compression { fn default() -> Lz4Compression { @@ -86,7 +88,7 @@ impl Compression for Lz4Compression { .block_mode(BlockMode::Independent) .build(w) .expect("TODO"); - Box::new(Wrapper {s: Some(encoder)}) + Box::new(Wrapper { s: Some(encoder) }) } } @@ -95,6 +97,7 @@ mod tests { use super::*; use crate::compression::CompressionType; + #[rustfmt::skip] const TEST_BLOCK_I16_LZ4: [u8; 47] = [ 0x00, 0x00, 0x00, 0x03, @@ -115,14 +118,16 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_LZ4.as_ref(), - CompressionType::Lz4(Lz4Compression::default())); + CompressionType::Lz4(Lz4Compression::default()), + ); } #[test] fn test_write_doc_spec_block() { crate::tests::test_write_doc_spec_block( TEST_BLOCK_I16_LZ4.as_ref(), - CompressionType::Lz4(Lz4Compression::default())); + CompressionType::Lz4(Lz4Compression::default()), + ); } #[test] diff --git a/src/compression/lz_pure.rs b/src/compression/lz_pure.rs index f274754..c41ca41 100644 --- a/src/compression/lz_pure.rs +++ b/src/compression/lz_pure.rs @@ -1,4 +1,8 @@ -use std::io::{Read, Result, Write}; +use std::io::{ + Read, + Result, + Write, +}; use lz_fear::framed::{ CompressionSettings, @@ -9,10 +13,7 @@ use serde::{ Serialize, }; -use super::{ - Compression, -}; - +use super::Compression; struct Wrapper { writer: W, @@ -31,7 +32,6 @@ impl Write for Wrapper { } } - #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] #[serde(rename_all = "camelCase")] pub struct Lz4Compression { @@ -39,7 +39,9 @@ pub struct Lz4Compression { block_size: i32, } -fn default_lz4_block_size() -> i32 {65_536} +fn default_lz4_block_size() -> i32 { + 65_536 +} impl Default for Lz4Compression { fn default() -> Lz4Compression { @@ -51,17 +53,19 @@ impl Default for Lz4Compression { impl Compression for Lz4Compression { fn decoder<'a, R: Read + 'a>(&self, r: R) -> Box { - Box::new(LZ4FrameReader::new(r).expect("TODO: LZ4 returns a result here").into_read()) + Box::new( + LZ4FrameReader::new(r) + .expect("TODO: LZ4 returns a result here") + .into_read(), + ) } fn encoder<'a, W: Write + 'a>(&self, writer: W) -> Box { let mut settings = CompressionSettings::default(); - settings.block_size(self.block_size as usize) + settings + .block_size(self.block_size as usize) .independent_blocks(true); - Box::new(Wrapper { - writer, - settings, - }) + Box::new(Wrapper { writer, settings }) } } @@ -70,6 +74,7 @@ mod tests { use super::*; use crate::compression::CompressionType; + #[rustfmt::skip] const TEST_BLOCK_I16_LZ4: [u8; 47] = [ 0x00, 0x00, 0x00, 0x03, @@ -90,14 +95,16 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_LZ4.as_ref(), - CompressionType::Lz4(Lz4Compression::default())); + CompressionType::Lz4(Lz4Compression::default()), + ); } #[test] fn test_write_doc_spec_block() { crate::tests::test_write_doc_spec_block( TEST_BLOCK_I16_LZ4.as_ref(), - CompressionType::Lz4(Lz4Compression::default())); + CompressionType::Lz4(Lz4Compression::default()), + ); } #[test] diff --git a/src/compression/mod.rs b/src/compression/mod.rs index b273dd6..ad0e95c 100644 --- a/src/compression/mod.rs +++ b/src/compression/mod.rs @@ -1,30 +1,33 @@ //! Compression for block voxel data. -use std::io::{Read, Write}; +use std::io::{ + Read, + Write, +}; use serde::{ Deserialize, Serialize, }; - -pub mod raw; #[cfg(feature = "bzip")] pub mod bzip; #[cfg(feature = "gzip")] pub mod gzip; -#[cfg(feature = "lz")] +#[cfg(all(feature = "lz", not(feature = "lz_pure")))] pub mod lz; #[cfg(feature = "lz_pure")] pub(self) mod lz_pure; +pub mod raw; #[cfg(feature = "lz_pure")] -pub mod lz { pub use super::lz_pure::*; } +pub mod lz { + pub use super::lz_pure::*; +} #[cfg(feature = "xz")] pub mod xz; - /// Common interface for compressing writers and decompressing readers. -pub trait Compression : Default { +pub trait Compression: Default { fn decoder<'a, R: Read + 'a>(&self, r: R) -> Box; fn encoder<'a, W: Write + 'a>(&self, w: W) -> Box; @@ -48,7 +51,9 @@ pub enum CompressionType { impl CompressionType { pub fn new() -> CompressionType - where CompressionType: std::convert::From { + where + CompressionType: std::convert::From, + { T::default().into() } } @@ -99,21 +104,25 @@ impl Compression for CompressionType { impl std::fmt::Display for CompressionType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", match *self { - CompressionType::Raw(_) => "Raw", + write!( + f, + "{}", + match *self { + CompressionType::Raw(_) => "Raw", - #[cfg(feature = "bzip")] - CompressionType::Bzip2(_) => "Bzip2", + #[cfg(feature = "bzip")] + CompressionType::Bzip2(_) => "Bzip2", - #[cfg(feature = "gzip")] - CompressionType::Gzip(_) => "Gzip", + #[cfg(feature = "gzip")] + CompressionType::Gzip(_) => "Gzip", - #[cfg(feature = "xz")] - CompressionType::Xz(_) => "Xz", + #[cfg(feature = "xz")] + CompressionType::Xz(_) => "Xz", - #[cfg(any(feature = "lz", feature = "lz_pure"))] - CompressionType::Lz4(_) => "Lz4", - }) + #[cfg(any(feature = "lz", feature = "lz_pure"))] + CompressionType::Lz4(_) => "Lz4", + } + ) } } @@ -148,7 +157,7 @@ macro_rules! compression_from_impl { CompressionType::$variant(c) } } - } + }; } compression_from_impl!(Raw, raw::RawCompression); diff --git a/src/compression/raw.rs b/src/compression/raw.rs index 0012990..0e0a847 100644 --- a/src/compression/raw.rs +++ b/src/compression/raw.rs @@ -1,4 +1,7 @@ -use std::io::{Read, Write}; +use std::io::{ + Read, + Write, +}; use serde::{ Deserialize, @@ -7,7 +10,6 @@ use serde::{ use super::Compression; - #[derive(Clone, Serialize, Deserialize, PartialEq, Debug, Default)] pub struct RawCompression; @@ -27,6 +29,7 @@ mod tests { use crate::compression::CompressionType; // Example from the n5 documentation spec. + #[rustfmt::skip] const TEST_BLOCK_I16_RAW: [u8; 28] = [ 0x00, 0x00, 0x00, 0x03, @@ -45,14 +48,16 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_RAW.as_ref(), - CompressionType::Raw(RawCompression)); + CompressionType::Raw(RawCompression), + ); } #[test] fn test_write_doc_spec_block() { crate::tests::test_write_doc_spec_block( TEST_BLOCK_I16_RAW.as_ref(), - CompressionType::Raw(RawCompression)); + CompressionType::Raw(RawCompression), + ); } #[test] diff --git a/src/compression/xz.rs b/src/compression/xz.rs index 1564401..8efd772 100644 --- a/src/compression/xz.rs +++ b/src/compression/xz.rs @@ -1,4 +1,7 @@ -use std::io::{Read, Write}; +use std::io::{ + Read, + Write, +}; use serde::{ Deserialize, @@ -7,10 +10,7 @@ use serde::{ use xz2::read::XzDecoder; use xz2::write::XzEncoder; -use super::{ - Compression, -}; - +use super::Compression; #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] #[serde(rename_all = "camelCase")] @@ -19,7 +19,9 @@ pub struct XzCompression { preset: i32, } -fn default_xz_preset() -> i32 {6} +fn default_xz_preset() -> i32 { + 6 +} impl Default for XzCompression { fn default() -> XzCompression { @@ -46,6 +48,7 @@ mod tests { use crate::compression::CompressionType; // Example from the n5 documentation spec. + #[rustfmt::skip] const TEST_BLOCK_I16_XZ: [u8; 84] = [ 0x00, 0x00, 0x00, 0x03, @@ -75,14 +78,16 @@ mod tests { fn test_read_doc_spec_block() { crate::tests::test_read_doc_spec_block( TEST_BLOCK_I16_XZ.as_ref(), - CompressionType::Xz(XzCompression::default())); + CompressionType::Xz(XzCompression::default()), + ); } #[test] fn test_write_doc_spec_block() { crate::tests::test_write_doc_spec_block( TEST_BLOCK_I16_XZ.as_ref(), - CompressionType::Xz(XzCompression::default())); + CompressionType::Xz(XzCompression::default()), + ); } #[test] diff --git a/src/data_type.rs b/src/data_type.rs index aa9d67c..73a1d0b 100644 --- a/src/data_type.rs +++ b/src/data_type.rs @@ -6,7 +6,6 @@ use serde::{ use crate::BlockHeader; use crate::VecDataBlock; - /// Data types representable in N5. #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Copy)] #[serde(rename_all = "lowercase")] @@ -103,11 +102,7 @@ macro_rules! data_type_match { impl DataType { /// Boilerplate method for reflection of primitive type sizes. pub fn size_of(self) -> usize { - data_type_match!(self, - { - std::mem::size_of::() - } - ) + data_type_match!(self, { std::mem::size_of::() }) } } @@ -125,9 +120,7 @@ impl std::fmt::Display for DataType { pub trait ReflectedType: Send + Sync + Clone + Default + 'static { const VARIANT: DataType; - fn create_data_block( - header: BlockHeader, - ) -> VecDataBlock { + fn create_data_block(header: BlockHeader) -> VecDataBlock { VecDataBlock::::new( header.size, header.grid_position, @@ -141,21 +134,20 @@ macro_rules! reflected_type { impl ReflectedType for $d_type { const VARIANT: DataType = DataType::$d_name; } - } + }; } -reflected_type!(UINT8, u8); +reflected_type!(UINT8, u8); reflected_type!(UINT16, u16); reflected_type!(UINT32, u32); reflected_type!(UINT64, u64); -reflected_type!(INT8, i8); +reflected_type!(INT8, i8); reflected_type!(INT16, i16); reflected_type!(INT32, i32); reflected_type!(INT64, i64); reflected_type!(FLOAT32, f32); reflected_type!(FLOAT64, f64); - #[cfg(test)] mod tests { use super::*; diff --git a/src/filesystem.rs b/src/filesystem.rs index 9802e94..dd416da 100644 --- a/src/filesystem.rs +++ b/src/filesystem.rs @@ -5,18 +5,16 @@ use std::fs::{ File, }; use std::io::{ - Error, - ErrorKind, BufReader, BufWriter, + Error, + ErrorKind, Read, Result, Seek, SeekFrom, }; -use std::path::{ - PathBuf, -}; +use std::path::PathBuf; use std::str::FromStr; use fs2::FileExt; @@ -46,11 +44,9 @@ use crate::{ WriteableDataBlock, }; - /// Name of the attributes file stored in the container root and dataset dirs. const ATTRIBUTES_FILE: &str = "attributes.json"; - /// A filesystem-backed N5 container. #[derive(Clone, Debug)] pub struct N5Filesystem { @@ -68,7 +64,7 @@ impl N5Filesystem { let version = reader.get_version()?; if !is_version_compatible(&crate::VERSION, &version) { - return Err(Error::new(ErrorKind::Other, "TODO: Incompatible version")) + return Err(Error::new(ErrorKind::Other, "TODO: Incompatible version")); } } @@ -85,10 +81,18 @@ impl N5Filesystem { fs::create_dir_all(base_path)?; - if reader.get_version().map(|v| !is_version_compatible(&crate::VERSION, &v)).unwrap_or(false) { - return Err(Error::new(ErrorKind::Other, "TODO: Incompatible version")) + if reader + .get_version() + .map(|v| !is_version_compatible(&crate::VERSION, &v)) + .unwrap_or(false) + { + return Err(Error::new(ErrorKind::Other, "TODO: Incompatible version")); } else { - reader.set_attribute("", crate::VERSION_ATTRIBUTE_KEY.to_owned(), crate::VERSION.to_string())?; + reader.set_attribute( + "", + crate::VERSION_ATTRIBUTE_KEY.to_owned(), + crate::VERSION.to_string(), + )?; } Ok(reader) @@ -117,15 +121,21 @@ impl N5Filesystem { // Note: cannot use `canonicalize` on both the constructed dataset path // and `base_path` and check `starts_with`, because `canonicalize` also // requires the path exist. - use std::path::{Component, Path}; + use std::path::{ + Component, + Path, + }; // Normalize the path to be relative. let mut components = Path::new(path_name).components(); while components.as_path().has_root() { match components.next() { - Some(Component::Prefix(_)) => return Err(Error::new( - ErrorKind::NotFound, - "Path name is outside this N5 filesystem on a prefix path")), + Some(Component::Prefix(_)) => { + return Err(Error::new( + ErrorKind::NotFound, + "Path name is outside this N5 filesystem on a prefix path", + )) + } Some(Component::RootDir) => (), // This should be unreachable. _ => return Err(Error::new(ErrorKind::NotFound, "Path is malformed")), @@ -138,7 +148,9 @@ impl N5Filesystem { for component in unrooted_path.components() { match component { // This should be unreachable. - Component::Prefix(_) | Component::RootDir => return Err(Error::new(ErrorKind::NotFound, "Path is malformed")), + Component::Prefix(_) | Component::RootDir => { + return Err(Error::new(ErrorKind::NotFound, "Path is malformed")) + } Component::CurDir => continue, Component::ParentDir => nest -= 1, Component::Normal(_) => nest += 1, @@ -146,7 +158,10 @@ impl N5Filesystem { } if nest < 0 { - Err(Error::new(ErrorKind::NotFound, "Path name is outside this N5 filesystem")) + Err(Error::new( + ErrorKind::NotFound, + "Path name is outside this N5 filesystem", + )) } else { Ok(self.base_path.join(unrooted_path)) } @@ -170,12 +185,14 @@ impl N5Filesystem { impl N5Reader for N5Filesystem { fn get_version(&self) -> Result { // TODO: dedicated error type should clean this up. - Ok(Version::from_str(self - .get_attributes("")? + Ok(Version::from_str( + self.get_attributes("")? .get(crate::VERSION_ATTRIBUTE_KEY) - .ok_or_else(|| Error::new(ErrorKind::NotFound, "Version attribute not present"))? - .as_str().unwrap_or("") - ).unwrap()) + .ok_or_else(|| Error::new(ErrorKind::NotFound, "Version attribute not present"))? + .as_str() + .unwrap_or(""), + ) + .unwrap()) } fn get_dataset_attributes(&self, path_name: &str) -> Result { @@ -190,7 +207,8 @@ impl N5Reader for N5Filesystem { } fn get_block_uri(&self, path_name: &str, grid_position: &[u64]) -> Result { - self.get_data_block_path(path_name, grid_position)?.to_str() + self.get_data_block_path(path_name, grid_position)? + .to_str() // TODO: could use URL crate and `from_file_path` here. .map(|s| format!("file://{}", s)) .ok_or_else(|| Error::new(ErrorKind::InvalidData, "Paths must be UTF-8")) @@ -202,23 +220,31 @@ impl N5Reader for N5Filesystem { data_attrs: &DatasetAttributes, grid_position: GridCoord, ) -> Result>> - where VecDataBlock: DataBlock + ReadableDataBlock, - T: ReflectedType { + where + VecDataBlock: DataBlock + ReadableDataBlock, + T: ReflectedType, + { let block_file = self.get_data_block_path(path_name, &grid_position)?; if block_file.is_file() { let file = File::open(block_file)?; file.lock_shared()?; let reader = BufReader::new(file); - Ok(Some(>::read_block( - reader, - data_attrs, - grid_position)?)) + Ok(Some( + >::read_block( + reader, + data_attrs, + grid_position, + )?, + )) } else { Ok(None) } } - fn read_block_into + ReinitDataBlock + ReadableDataBlock>( + fn read_block_into< + T: ReflectedType, + B: DataBlock + ReinitDataBlock + ReadableDataBlock, + >( &self, path_name: &str, data_attrs: &DatasetAttributes, @@ -234,7 +260,8 @@ impl N5Reader for N5Filesystem { reader, data_attrs, grid_position, - block)?; + block, + )?; Ok(Some(())) } else { Ok(None) @@ -277,7 +304,11 @@ impl N5Lister for N5Filesystem { Ok(fs::read_dir(self.get_path(path_name)?)? .filter_map(|e| { if let Ok(file) = e { - if fs::metadata(file.path()).map(|f| f.file_type().is_dir()).ok() == Some(true) { + if fs::metadata(file.path()) + .map(|f| f.file_type().is_dir()) + .ok() + == Some(true) + { file.file_name().into_string().ok() } else { None @@ -286,8 +317,7 @@ impl N5Lister for N5Filesystem { None } }) - .collect() - ) + .collect()) } } @@ -339,10 +369,7 @@ impl N5Writer for N5Filesystem { fs::create_dir_all(path) } - fn remove( - &self, - path_name: &str, - ) -> Result<()> { + fn remove(&self, path_name: &str) -> Result<()> { let path = self.get_path(path_name)?; for entry in WalkDir::new(path).contents_first(true) { @@ -379,23 +406,14 @@ impl N5Writer for N5Filesystem { file.set_len(0)?; let buffer = BufWriter::new(file); - >::write_block( - buffer, - data_attrs, - block) + >::write_block(buffer, data_attrs, block) } - fn delete_block( - &self, - path_name: &str, - grid_position: &[u64], - ) -> Result { + fn delete_block(&self, path_name: &str, grid_position: &[u64]) -> Result { let path = self.get_data_block_path(path_name, grid_position)?; if path.exists() { - let file = fs::OpenOptions::new() - .read(true) - .open(&path)?; + let file = fs::OpenOptions::new().read(true).open(&path)?; file.lock_exclusive()?; fs::remove_file(&path)?; } @@ -408,7 +426,10 @@ impl N5Writer for N5Filesystem { mod tests { use super::*; use crate::test_backend; - use crate::tests::{ContextWrapper, N5Testable}; + use crate::tests::{ + ContextWrapper, + N5Testable, + }; use tempdir::TempDir; impl crate::tests::N5Testable for N5Filesystem { @@ -416,13 +437,10 @@ mod tests { fn temp_new_rw() -> Self::Wrapper { let dir = TempDir::new("rust_n5_tests").unwrap(); - let n5 = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n5 = + N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); - ContextWrapper { - context: dir, - n5, - } + ContextWrapper { context: dir, n5 } } fn open_reader(&self) -> Self { @@ -440,7 +458,10 @@ mod tests { assert!(create.get_path("/").is_ok()); assert_eq!(create.get_path("/").unwrap(), create.get_path("").unwrap()); assert!(create.get_path("/foo/bar").is_ok()); - assert_eq!(create.get_path("/foo/bar").unwrap(), create.get_path("foo/bar").unwrap()); + assert_eq!( + create.get_path("/foo/bar").unwrap(), + create.get_path("foo/bar").unwrap() + ); assert!(create.get_path("//").is_ok()); assert_eq!(create.get_path("//").unwrap(), create.get_path("").unwrap()); assert!(create.get_path("/..").is_err()); @@ -483,22 +504,38 @@ mod tests { smallvec![10, 10, 10], smallvec![5, 5, 5], crate::DataType::INT32, - crate::compression::CompressionType::Raw(crate::compression::raw::RawCompression::default()), + crate::compression::CompressionType::Raw( + crate::compression::raw::RawCompression::default(), + ), ); - wrapper.n5.create_dataset("linked_dataset", &data_attrs) + wrapper + .n5 + .create_dataset("linked_dataset", &data_attrs) .expect("Failed to create dataset"); assert!(wrapper.n5.dataset_exists("linked_dataset").unwrap()); } #[test] + // TODO: this test is ignored on windows because the dataset path in the returned URI still includes the unix slash. + // This will be fixed by parsing dataset paths as unix paths in `get_path`, then translating to platform-native + // `PathBuf`s. However, the only way to do this at the moment with the `typed_paths` crate depends on unstable + // features. See also rust issue #66621. + #[cfg_attr(windows, ignore)] fn test_get_block_uri() { let dir = TempDir::new("rust_n5_tests").unwrap(); let path_str = dir.path().to_str().unwrap(); - let create = N5Filesystem::open_or_create(path_str) - .expect("Failed to create N5 filesystem"); + let create = + N5Filesystem::open_or_create(path_str).expect("Failed to create N5 filesystem"); let uri = create.get_block_uri("foo/bar", &vec![1, 2, 3]).unwrap(); - assert_eq!(uri, format!("file://{}/foo/bar/1/2/3", path_str)); + assert_eq!( + uri, + format!( + "file://{}{s}foo{s}bar{s}1{s}2{s}3", + path_str, + s = std::path::MAIN_SEPARATOR + ) + ); } #[test] @@ -509,24 +546,31 @@ mod tests { smallvec![10, 10, 10], smallvec![5, 5, 5], crate::DataType::INT32, - crate::compression::CompressionType::Raw(crate::compression::raw::RawCompression::default()), + crate::compression::CompressionType::Raw( + crate::compression::raw::RawCompression::default(), + ), ); let block_data: Vec = (0..125_i32).collect(); let block_in = crate::SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); + &block_data, + ); - create.create_dataset("foo/bar", &data_attrs) + create + .create_dataset("foo/bar", &data_attrs) .expect("Failed to create dataset"); - create.write_block("foo/bar", &data_attrs, &block_in) + create + .write_block("foo/bar", &data_attrs, &block_in) .expect("Failed to write block"); let read = create.open_reader(); - let block_out = read.read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) + let block_out = read + .read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) .expect("Failed to read block") .expect("Block is empty"); - let missing_block_out = read.read_block::("foo/bar", &data_attrs, smallvec![0, 0, 1]) + let missing_block_out = read + .read_block::("foo/bar", &data_attrs, smallvec![0, 0, 1]) .expect("Failed to read block"); assert_eq!(block_out.get_data(), &block_data[..]); @@ -537,8 +581,10 @@ mod tests { let block_in = crate::SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); - create.write_block("foo/bar", &data_attrs, &block_in) + &block_data, + ); + create + .write_block("foo/bar", &data_attrs, &block_in) .expect("Failed to write block"); let block_file = create.get_data_block_path("foo/bar", &[0, 0, 0]).unwrap(); @@ -548,7 +594,7 @@ mod tests { let header_len = 2 * std::mem::size_of::() + 4 * std::mem::size_of::(); assert_eq!( metadata.len(), - (header_len + block_data.len() * std::mem::size_of::()) as u64); - + (header_len + block_data.len() * std::mem::size_of::()) as u64 + ); } } diff --git a/src/lib.rs b/src/lib.rs index 3ba8335..af2fd5e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,18 +5,15 @@ #![deny(missing_debug_implementations)] #![forbid(unsafe_code)] - // TODO: this does not run the test for recent stable rust because `test` // is no longer set during doc tests. When 1.40 stabilizes and is the MSRV // this can be changed from `test` to `doctest` and will work correctly. #[cfg(all(test, feature = "filesystem"))] doc_comment::doctest!("../README.md"); - #[macro_use] pub extern crate smallvec; - use std::io::{ Error, ErrorKind, @@ -54,7 +51,6 @@ pub(crate) mod tests; pub use semver::Version; - const COORD_SMALLVEC_SIZE: usize = 6; pub type CoordVec = SmallVec<[T; COORD_SMALLVEC_SIZE]>; pub type BlockCoord = CoordVec; @@ -123,8 +119,9 @@ pub trait N5Reader { data_attrs: &DatasetAttributes, grid_position: GridCoord, ) -> Result>, Error> - where VecDataBlock: DataBlock + ReadableDataBlock, - T: ReflectedType; + where + VecDataBlock: DataBlock + ReadableDataBlock, + T: ReflectedType; /// Read a single dataset block into an existing buffer. fn read_block_into + ReinitDataBlock + ReadableDataBlock>( @@ -148,13 +145,13 @@ pub trait N5Reader { } /// Non-mutating operations on N5 containers that support group discoverability. -pub trait N5Lister : N5Reader { +pub trait N5Lister: N5Reader { /// List all groups (including datasets) in a group. fn list(&self, path_name: &str) -> Result, Error>; } /// Mutating operations on N5 containers. -pub trait N5Writer : N5Reader { +pub trait N5Writer: N5Reader { /// Set a single attribute. fn set_attribute( &self, // TODO: should this be mut for semantics? @@ -164,7 +161,10 @@ pub trait N5Writer : N5Reader { ) -> Result<(), Error> { self.set_attributes( path_name, - vec![(key, serde_json::to_value(attribute)?)].into_iter().collect()) + vec![(key, serde_json::to_value(attribute)?)] + .into_iter() + .collect(), + ) } /// Set a map of attributes. @@ -192,11 +192,7 @@ pub trait N5Writer : N5Reader { /// Create a dataset. This will create the dataset group and attributes, /// but not populate any block data. - fn create_dataset( - &self, - path_name: &str, - data_attrs: &DatasetAttributes, - ) -> Result<(), Error> { + fn create_dataset(&self, path_name: &str, data_attrs: &DatasetAttributes) -> Result<(), Error> { self.create_group(path_name)?; self.set_dataset_attributes(path_name, data_attrs) } @@ -209,10 +205,7 @@ pub trait N5Writer : N5Reader { /// Remove a group or dataset (directory and all contained files). /// /// This will wait on locks acquired by other writers or readers. - fn remove( - &self, - path_name: &str, - ) -> Result<(), Error>; + fn remove(&self, path_name: &str) -> Result<(), Error>; fn write_block + WriteableDataBlock>( &self, @@ -225,16 +218,11 @@ pub trait N5Writer : N5Reader { /// /// Returns `true` if the block does not exist on the backend at the /// completion of the call. - fn delete_block( - &self, - path_name: &str, - grid_position: &[u64], - ) -> Result; + fn delete_block(&self, path_name: &str, grid_position: &[u64]) -> Result; } - fn u64_ceil_div(a: u64, b: u64) -> u64 { - (a + 1) / b + (if a % b != 0 {1} else {0}) + (a + 1) / b + u64::from(a % b != 0) } /// Attributes of a tensor dataset. @@ -258,8 +246,11 @@ impl DatasetAttributes { data_type: DataType, compression: compression::CompressionType, ) -> DatasetAttributes { - assert_eq!(dimensions.len(), block_size.len(), - "Number of dataset dimensions must match number of block size dimensions."); + assert_eq!( + dimensions.len(), + block_size.len(), + "Number of dataset dimensions must match number of block size dimensions." + ); DatasetAttributes { dimensions, block_size, @@ -300,7 +291,8 @@ impl DatasetAttributes { /// Get the upper bound extent of grid coordinates. pub fn get_grid_extent(&self) -> GridCoord { - self.dimensions.iter() + self.dimensions + .iter() .zip(self.block_size.iter().cloned().map(u64::from)) .map(|(d, b)| u64_ceil_div(*d, b)) .collect() @@ -336,14 +328,15 @@ impl DatasetAttributes { /// assert!(!attrs.in_bounds(&smallvec![5, 3, 2])); /// ``` pub fn in_bounds(&self, grid_position: &GridCoord) -> bool { - self.dimensions.len() == grid_position.len() && - self.get_grid_extent().iter() - .zip(grid_position.iter()) - .all(|(&bound, &coord)| coord < bound) + self.dimensions.len() == grid_position.len() + && self + .get_grid_extent() + .iter() + .zip(grid_position.iter()) + .all(|(&bound, &coord)| coord < bound) } } - /// Unencoded, non-payload header of a data block. #[derive(Debug)] pub struct BlockHeader { @@ -466,7 +459,7 @@ macro_rules! vec_data_block_impl { Ok(()) } } - } + }; } // Wrapper trait to erase a generic trait argument for consistent ByteOrder @@ -522,11 +515,7 @@ const BLOCK_FIXED_LEN: u16 = 0; const BLOCK_VAR_LEN: u16 = 1; pub trait DefaultBlockHeaderReader { - fn read_block_header( - buffer: &mut R, - grid_position: GridCoord, - ) -> std::io::Result { - + fn read_block_header(buffer: &mut R, grid_position: GridCoord) -> std::io::Result { let mode = buffer.read_u16::()?; let ndim = buffer.read_u16::()?; let mut size = smallvec![0; ndim as usize]; @@ -534,7 +523,7 @@ pub trait DefaultBlockHeaderReader { let num_el = match mode { BLOCK_FIXED_LEN => size.iter().product(), BLOCK_VAR_LEN => buffer.read_u32::()?, - _ => return Err(Error::new(ErrorKind::InvalidData, "Unsupported block mode")) + _ => return Err(Error::new(ErrorKind::InvalidData, "Unsupported block mode")), }; Ok(BlockHeader { @@ -546,18 +535,22 @@ pub trait DefaultBlockHeaderReader { } /// Reads blocks from rust readers. -pub trait DefaultBlockReader: DefaultBlockHeaderReader { +pub trait DefaultBlockReader: + DefaultBlockHeaderReader +{ fn read_block( mut buffer: R, data_attrs: &DatasetAttributes, grid_position: GridCoord, ) -> std::io::Result> - where VecDataBlock: DataBlock + ReadableDataBlock { - + where + VecDataBlock: DataBlock + ReadableDataBlock, + { if data_attrs.data_type != T::VARIANT { return Err(Error::new( ErrorKind::InvalidInput, - "Attempt to create data block for wrong type.")) + "Attempt to create data block for wrong type.", + )); } let header = Self::read_block_header(&mut buffer, grid_position)?; @@ -574,11 +567,11 @@ pub trait DefaultBlockReader: DefaultBlockHe grid_position: GridCoord, block: &mut B, ) -> std::io::Result<()> { - if data_attrs.data_type != T::VARIANT { return Err(Error::new( ErrorKind::InvalidInput, - "Attempt to create data block for wrong type.")) + "Attempt to create data block for wrong type.", + )); } let header = Self::read_block_header(&mut buffer, grid_position)?; @@ -591,21 +584,29 @@ pub trait DefaultBlockReader: DefaultBlockHe } /// Writes blocks to rust writers. -pub trait DefaultBlockWriter + WriteableDataBlock> { +pub trait DefaultBlockWriter< + T: ReflectedType, + W: std::io::Write, + B: DataBlock + WriteableDataBlock, +> +{ fn write_block( mut buffer: W, data_attrs: &DatasetAttributes, block: &B, ) -> std::io::Result<()> { - if data_attrs.data_type != T::VARIANT { return Err(Error::new( ErrorKind::InvalidInput, - "Attempt to write data block for wrong type.")) + "Attempt to write data block for wrong type.", + )); } - let mode: u16 = if block.get_num_elements() == block.get_size().iter().product::() - {BLOCK_FIXED_LEN} else {BLOCK_VAR_LEN}; + let mode: u16 = if block.get_num_elements() == block.get_size().iter().product::() { + BLOCK_FIXED_LEN + } else { + BLOCK_VAR_LEN + }; buffer.write_u16::(mode)?; buffer.write_u16::(data_attrs.get_ndim() as u16)?; for i in block.get_size() { @@ -630,4 +631,7 @@ pub trait DefaultBlockWriter DefaultBlockHeaderReader for DefaultBlock {} impl DefaultBlockReader for DefaultBlock {} -impl + WriteableDataBlock> DefaultBlockWriter for DefaultBlock {} +impl + WriteableDataBlock> + DefaultBlockWriter for DefaultBlock +{ +} diff --git a/src/ndarray.rs b/src/ndarray.rs index fde438f..7085f24 100644 --- a/src/ndarray.rs +++ b/src/ndarray.rs @@ -3,9 +3,7 @@ use std::io::{ Error, ErrorKind, }; -use std::ops::{ - Sub, -}; +use std::ops::Sub; use itertools::Itertools; use ndarray::{ @@ -30,7 +28,6 @@ use crate::{ WriteableDataBlock, }; - pub mod prelude { pub use super::{ BoundingBox, @@ -39,7 +36,6 @@ pub mod prelude { }; } - /// Specifes the extents of an axis-aligned bounding box. #[derive(Clone, Debug, PartialEq, Eq)] pub struct BoundingBox { @@ -51,10 +47,7 @@ impl BoundingBox { pub fn new(offset: GridCoord, size: GridCoord) -> BoundingBox { assert_eq!(offset.len(), size.len()); - BoundingBox { - offset, - size, - } + BoundingBox { offset, size } } pub fn size_block(&self) -> BlockCoord { @@ -77,7 +70,8 @@ impl BoundingBox { pub fn intersect(&mut self, other: &BoundingBox) { assert_eq!(self.offset.len(), other.offset.len()); - self.size.iter_mut() + self.size + .iter_mut() .zip(self.offset.iter_mut()) .zip(other.size.iter()) .zip(other.offset.iter()) @@ -100,7 +94,8 @@ impl BoundingBox { pub fn union(&mut self, other: &BoundingBox) { assert_eq!(self.offset.len(), other.offset.len()); - self.size.iter_mut() + self.size + .iter_mut() .zip(self.offset.iter_mut()) .zip(other.size.iter()) .zip(other.offset.iter()) @@ -111,17 +106,20 @@ impl BoundingBox { }); } - pub fn end(&self) -> impl Iterator + '_ { + pub fn end(&self) -> impl Iterator + '_ { self.offset.iter().zip(self.size.iter()).map(|(o, s)| o + s) } pub fn to_ndarray_slice(&self) -> CoordVec { - self.offset.iter().zip(self.end()) + self.offset + .iter() + .zip(self.end()) .map(|(&start, end)| ndarray::SliceInfoElem::Slice { start: start as isize, end: Some(end as isize), step: 1, - }).collect() + }) + .collect() } pub fn is_empty(&self) -> bool { @@ -134,7 +132,9 @@ impl Sub<&GridCoord> for BoundingBox { fn sub(self, other: &GridCoord) -> Self::Output { Self { - offset: self.offset.iter() + offset: self + .offset + .iter() .zip(other.iter()) .map(|(s, o)| s.checked_sub(*o).unwrap()) .collect(), @@ -143,7 +143,7 @@ impl Sub<&GridCoord> for BoundingBox { } } -pub trait N5NdarrayReader : N5Reader { +pub trait N5NdarrayReader: N5Reader { /// Read an arbitrary bounding box from an N5 volume into an ndarray, /// reading blocks in serial as necessary. /// @@ -154,9 +154,10 @@ pub trait N5NdarrayReader : N5Reader { data_attrs: &DatasetAttributes, bbox: &BoundingBox, ) -> Result>, Error> - where VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, - T: ReflectedType + num_traits::identities::Zero { - + where + VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, + T: ReflectedType + num_traits::identities::Zero, + { let mut arr = Array::zeros(bbox.size_ndarray_shape().f()); self.read_ndarray_into(path_name, data_attrs, bbox, arr.view_mut())?; @@ -176,9 +177,10 @@ pub trait N5NdarrayReader : N5Reader { bbox: &BoundingBox, arr: ndarray::ArrayViewMut<'a, T, ndarray::Dim>, ) -> Result<(), Error> - where VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, - T: ReflectedType + num_traits::identities::Zero { - + where + VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, + T: ReflectedType + num_traits::identities::Zero, + { self.read_ndarray_into_with_buffer(path_name, data_attrs, bbox, arr, &mut None) } @@ -196,35 +198,42 @@ pub trait N5NdarrayReader : N5Reader { mut arr: ndarray::ArrayViewMut<'a, T, ndarray::Dim>, block_buff_opt: &mut Option>, ) -> Result<(), Error> - where VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, - T: ReflectedType + num_traits::identities::Zero { - + where + VecDataBlock: DataBlock + ReinitDataBlock + ReadableDataBlock, + T: ReflectedType + num_traits::identities::Zero, + { if bbox.offset.len() != data_attrs.get_ndim() || data_attrs.get_ndim() != arr.ndim() { - return Err(Error::new(ErrorKind::InvalidData, "Wrong number of dimensions")); + return Err(Error::new( + ErrorKind::InvalidData, + "Wrong number of dimensions", + )); } if bbox.size_ndarray_shape().as_slice() != arr.shape() { - return Err(Error::new(ErrorKind::InvalidData, "Bounding box and array have different shape")); + return Err(Error::new( + ErrorKind::InvalidData, + "Bounding box and array have different shape", + )); } for coord in data_attrs.bounded_coord_iter(bbox) { - let grid_pos = GridCoord::from(&coord[..]); let is_block = match block_buff_opt { None => { *block_buff_opt = self.read_block(path_name, data_attrs, grid_pos)?; block_buff_opt.is_some() - }, - Some(ref mut block_buff) => { - self.read_block_into(path_name, data_attrs, grid_pos, block_buff)?.is_some() } + Some(ref mut block_buff) => self + .read_block_into(path_name, data_attrs, grid_pos, block_buff)? + .is_some(), }; // TODO: cannot combine this into condition below until `let_chains` stabilizes. - if !is_block { continue; } + if !is_block { + continue; + } if let Some(ref block) = block_buff_opt { - let block_bb = block.get_bounds(data_attrs); let mut read_bb = bbox.clone(); read_bb.intersect(&block_bb); @@ -240,13 +249,15 @@ pub trait N5NdarrayReader : N5Reader { let block_read_bb = read_bb.clone() - &block_bb.offset; let arr_slice = arr_read_bb.to_ndarray_slice(); + let mut arr_view = arr.slice_mut(arr_slice.as_slice()); let block_slice = block_read_bb.to_ndarray_slice(); // N5 datasets are stored f-order/column-major. - let block_data = ArrayView::from_shape(block_bb.size_ndarray_shape().f(), block.get_data()) - .expect("TODO: block ndarray failed"); + let block_data = + ArrayView::from_shape(block_bb.size_ndarray_shape().f(), block.get_data()) + .expect("TODO: block ndarray failed"); let block_view = block_data.slice(block_slice.as_slice()); arr_view.assign(&block_view); @@ -259,8 +270,7 @@ pub trait N5NdarrayReader : N5Reader { impl N5NdarrayReader for T {} - -pub trait N5NdarrayWriter : N5Writer { +pub trait N5NdarrayWriter: N5Writer { /// Write an arbitrary bounding box from an ndarray into an N5 volume, /// writing blocks in serial as necessary. fn write_ndarray<'a, T, A>( @@ -271,14 +281,18 @@ pub trait N5NdarrayWriter : N5Writer { array: A, fill_val: T, ) -> Result<(), Error> - // TODO: Next breaking version, refactor to use `SliceDataBlock` bounds. - where VecDataBlock: DataBlock + ReadableDataBlock + WriteableDataBlock, - T: ReflectedType + num_traits::identities::Zero, - A: ndarray::AsArray<'a, T, ndarray::Dim> { - + // TODO: Next breaking version, refactor to use `SliceDataBlock` bounds. + where + VecDataBlock: DataBlock + ReadableDataBlock + WriteableDataBlock, + T: ReflectedType + num_traits::identities::Zero, + A: ndarray::AsArray<'a, T, ndarray::Dim>, + { let array = array.into(); if array.ndim() != data_attrs.get_ndim() { - return Err(Error::new(ErrorKind::InvalidData, "Wrong number of dimensions")); + return Err(Error::new( + ErrorKind::InvalidData, + "Wrong number of dimensions", + )); } let bbox = BoundingBox { offset, @@ -288,7 +302,6 @@ pub trait N5NdarrayWriter : N5Writer { let mut block_vec: Vec = Vec::new(); for coord in data_attrs.bounded_coord_iter(&bbox) { - let grid_coord = GridCoord::from(&coord[..]); let nom_block_bb = data_attrs.get_block_bounds(&grid_coord); let mut write_bb = nom_block_bb.clone(); @@ -299,7 +312,6 @@ pub trait N5NdarrayWriter : N5Writer { let arr_view = array.slice(arr_slice.as_slice()); if write_bb == nom_block_bb { - // No need to read whether there is an extant block if it is // going to be entirely overwriten. block_vec.clear(); @@ -308,35 +320,40 @@ pub trait N5NdarrayWriter : N5Writer { self.write_block(path_name, data_attrs, &block)?; block_vec = block.into_data(); - } else { - let block_opt = self.read_block(path_name, data_attrs, grid_coord.clone())?; let (block_bb, mut block_array) = match block_opt { Some(block) => { let block_bb = block.get_bounds(data_attrs); - let block_array = Array::from_shape_vec(block_bb.size_ndarray_shape().f(), block.into_data()) - .expect("TODO: block ndarray failed"); + let block_array = Array::from_shape_vec( + block_bb.size_ndarray_shape().f(), + block.into_data(), + ) + .expect("TODO: block ndarray failed"); (block_bb, block_array) - }, + } None => { // If no block exists, need to write from its origin. let mut block_bb = write_bb.clone(); - block_bb.size.iter_mut() + block_bb + .size + .iter_mut() .zip(write_bb.offset.iter()) .zip(nom_block_bb.offset.iter()) .for_each(|((s, o), g)| *s += *o - *g); block_bb.offset = nom_block_bb.offset.clone(); let block_size_usize = block_bb.size_ndarray_shape(); - let block_array = Array::from_elem(&block_size_usize[..], fill_val.clone()).into_dyn(); + let block_array = + Array::from_elem(&block_size_usize[..], fill_val.clone()).into_dyn(); (block_bb, block_array) } }; let block_write_bb = write_bb.clone() - &block_bb.offset; let block_slice = block_write_bb.to_ndarray_slice(); + let mut block_view = block_array.slice_mut(block_slice.as_slice()); block_view.assign(&arr_view); @@ -356,10 +373,11 @@ pub trait N5NdarrayWriter : N5Writer { impl N5NdarrayWriter for T {} - impl DatasetAttributes { pub fn coord_iter(&self) -> impl Iterator> + ExactSizeIterator { - let coord_ceil = self.get_dimensions().iter() + let coord_ceil = self + .get_dimensions() + .iter() .zip(self.get_block_size().iter()) .map(|(&d, &s)| (d + u64::from(s) - 1) / u64::from(s)) .collect::(); @@ -367,12 +385,19 @@ impl DatasetAttributes { CoordIterator::new(&coord_ceil) } - pub fn bounded_coord_iter(&self, bbox: &BoundingBox) -> impl Iterator> + ExactSizeIterator { - let floor_coord: GridCoord = bbox.offset.iter() + pub fn bounded_coord_iter( + &self, + bbox: &BoundingBox, + ) -> impl Iterator> + ExactSizeIterator { + let floor_coord: GridCoord = bbox + .offset + .iter() .zip(&self.block_size) .map(|(&o, &bs)| o / u64::from(bs)) .collect(); - let ceil_coord: GridCoord = bbox.offset.iter() + let ceil_coord: GridCoord = bbox + .offset + .iter() .zip(&bbox.size) .zip(self.block_size.iter().cloned().map(u64::from)) .map(|((&o, &s), bs)| (o + s + bs - 1) / bs) @@ -389,10 +414,13 @@ impl DatasetAttributes { } pub fn get_block_bounds(&self, coord: &GridCoord) -> BoundingBox { - let mut size: GridCoord = self.get_block_size().iter().cloned().map(u64::from).collect(); - let offset: GridCoord = coord.iter() - .zip(size.iter()) - .map(|(c, s)| c * s).collect(); + let mut size: GridCoord = self + .get_block_size() + .iter() + .cloned() + .map(u64::from) + .collect(); + let offset: GridCoord = coord.iter().zip(size.iter()).map(|(c, s)| c * s).collect(); size.iter_mut() .zip(offset.iter()) .zip(self.get_dimensions().iter()) @@ -402,6 +430,8 @@ impl DatasetAttributes { } impl SliceDataBlock { + /// Get the bounding box of the occupied extent of this block, which may + /// be smaller than the nominal bounding box expected from the dataset. pub fn get_bounds(&self, data_attrs: &DatasetAttributes) -> BoundingBox { let mut bbox = data_attrs.get_block_bounds(&self.grid_position); bbox.size = self.size.iter().cloned().map(u64::from).collect(); @@ -420,21 +450,21 @@ struct CoordIterator>> { impl CoordIterator>> { fn new(ceil: &[u64]) -> Self { CoordIterator { - iter: ceil.iter() - .map(|&c| 0..c) - .multi_cartesian_product(), + iter: ceil.iter().map(|&c| 0..c).multi_cartesian_product(), accumulator: 0, total_coords: ceil.iter().product::() as usize, } } fn floor_ceil(floor: &[u64], ceil: &[u64]) -> Self { - let total_coords = floor.iter() - .zip(ceil.iter()) - .map(|(&f, &c)| c - f) - .product::() as usize; + let total_coords = floor + .iter() + .zip(ceil.iter()) + .map(|(&f, &c)| c - f) + .product::() as usize; CoordIterator { - iter: floor.iter() + iter: floor + .iter() .zip(ceil.iter()) .map(|(&f, &c)| f..c) .multi_cartesian_product(), @@ -458,9 +488,7 @@ impl>> Iterator for CoordIterator { } } -impl>> ExactSizeIterator for CoordIterator { -} - +impl>> ExactSizeIterator for CoordIterator {} #[cfg(test)] pub(crate) mod tests { @@ -479,12 +507,10 @@ pub(crate) mod tests { }; let coords: HashSet> = data_attrs.coord_iter().collect(); - let expected: HashSet> = vec![ - vec![0, 0, 0], - vec![0, 0, 1], - vec![0, 1, 0], - vec![0, 1, 1], - ].into_iter().collect(); + let expected: HashSet> = + vec![vec![0, 0, 0], vec![0, 0, 1], vec![0, 1, 0], vec![0, 1, 1]] + .into_iter() + .collect(); assert_eq!(coords, expected); } diff --git a/src/prelude.rs b/src/prelude.rs index 6b0a1e7..51998f1 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -9,13 +9,21 @@ //! use n5::prelude::*; //! ``` +#[doc(no_inline)] +pub use crate::compression::{ + self, + CompressionType, +}; +#[cfg(feature = "filesystem")] +#[doc(no_inline)] +pub use crate::filesystem::N5Filesystem; #[doc(no_inline)] pub use crate::{ BlockCoord, - DatasetAttributes, DataBlock, DataBlockMetadata, DataType, + DatasetAttributes, GridCoord, N5Lister, N5Reader, @@ -24,11 +32,3 @@ pub use crate::{ SliceDataBlock, VecDataBlock, }; -#[doc(no_inline)] -pub use crate::compression::{ - self, - CompressionType, -}; -#[cfg(feature = "filesystem")] -#[doc(no_inline)] -pub use crate::filesystem::N5Filesystem; diff --git a/src/tests.rs b/src/tests.rs index 1abb4e2..e5b10c3 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -1,5 +1,8 @@ use super::*; -use std::io::{Cursor, Result}; +use std::io::{ + Cursor, + Result, +}; use serde_json::json; @@ -35,17 +38,16 @@ fn doc_spec_dataset_attributes(compression: compression::CompressionType) -> Dat } } -pub(crate) fn test_read_doc_spec_block( - block: &[u8], - compression: compression::CompressionType, -) { +pub(crate) fn test_read_doc_spec_block(block: &[u8], compression: compression::CompressionType) { let buff = Cursor::new(block); let data_attrs = doc_spec_dataset_attributes(compression); let block = >>::read_block( buff, &data_attrs, - smallvec![0, 0, 0]).expect("read_block failed"); + smallvec![0, 0, 0], + ) + .expect("read_block failed"); assert_eq!(block.get_size(), data_attrs.get_block_size()); assert_eq!(block.get_grid_position(), &[0, 0, 0]); @@ -53,20 +55,19 @@ pub(crate) fn test_read_doc_spec_block( } pub(crate) fn test_write_doc_spec_block( - expected_block: &[u8], - compression: compression::CompressionType, + expected_block: &[u8], + compression: compression::CompressionType, ) { let data_attrs = doc_spec_dataset_attributes(compression); let block_in = SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - DOC_SPEC_BLOCK_DATA); + DOC_SPEC_BLOCK_DATA, + ); let mut buff: Vec = Vec::new(); - >::write_block( - &mut buff, - &data_attrs, - &block_in).expect("read_block failed"); + >::write_block(&mut buff, &data_attrs, &block_in) + .expect("read_block failed"); assert_eq!(buff, expected_block); } @@ -82,19 +83,24 @@ pub(crate) fn test_block_compression_rw(compression: compression::CompressionTyp let block_in = SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); + &block_data, + ); let mut inner: Vec = Vec::new(); >::write_block( &mut inner, &data_attrs, - &block_in).expect("write_block failed"); + &block_in, + ) + .expect("write_block failed"); let block_out = >::read_block( &inner[..], &data_attrs, - smallvec![0, 0, 0]).expect("read_block failed"); + smallvec![0, 0, 0], + ) + .expect("read_block failed"); assert_eq!(block_out.get_size(), &[5, 5, 5]); assert_eq!(block_out.get_grid_position(), &[0, 0, 0]); @@ -112,19 +118,24 @@ pub(crate) fn test_varlength_block_rw(compression: compression::CompressionType) let block_in = SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); + &block_data, + ); let mut inner: Vec = Vec::new(); >::write_block( &mut inner, &data_attrs, - &block_in).expect("write_block failed"); + &block_in, + ) + .expect("write_block failed"); let block_out = >::read_block( &inner[..], &data_attrs, - smallvec![0, 0, 0]).expect("read_block failed"); + smallvec![0, 0, 0], + ) + .expect("read_block failed"); assert_eq!(block_out.get_size(), &[5, 5, 5]); assert_eq!(block_out.get_grid_position(), &[0, 0, 0]); @@ -134,12 +145,16 @@ pub(crate) fn test_varlength_block_rw(compression: compression::CompressionType) pub(crate) fn create_backend() { let wrapper = N::temp_new_rw(); let create = wrapper.as_ref(); - create.set_attribute("", "foo".to_owned(), "bar") + create + .set_attribute("", "foo".to_owned(), "bar") .expect("Failed to set attribute"); let read = create.open_reader(); - assert_eq!(read.get_version().expect("Cannot read version"), crate::VERSION); + assert_eq!( + read.get_version().expect("Cannot read version"), + crate::VERSION + ); assert_eq!(read.list_attributes("").unwrap()["foo"], "bar"); } @@ -152,7 +167,8 @@ pub(crate) fn create_dataset() { DataType::INT32, crate::compression::CompressionType::Raw(crate::compression::raw::RawCompression::default()), ); - create.create_dataset("foo/bar", &data_attrs) + create + .create_dataset("foo/bar", &data_attrs) .expect("Failed to create dataset"); let read = create.open_reader(); @@ -169,7 +185,8 @@ pub(crate) fn absolute_relative_paths() -> Result<()> { DataType::INT32, crate::compression::CompressionType::Raw(crate::compression::raw::RawCompression::default()), ); - create.create_dataset("foo/bar", &data_attrs) + create + .create_dataset("foo/bar", &data_attrs) .expect("Failed to create dataset"); let read = create.open_reader(); @@ -191,8 +208,7 @@ pub(crate) fn attributes_rw() { let wrapper = N::temp_new_rw(); let create = wrapper.as_ref(); let group = "foo"; - create.create_group(group) - .expect("Failed to create group"); + create.create_group(group).expect("Failed to create group"); // Currently reading attributes that have not been set is an error. // Whether this should be the case is still open for decision. @@ -201,43 +217,67 @@ pub(crate) fn attributes_rw() { let attrs_1 = json!({ "foo": {"bar": 42}, "baz": [1, 2, 3], - }).as_object().unwrap().clone(); - create.set_attributes(group, attrs_1.clone()).expect("Failed to set attributes"); + }) + .as_object() + .unwrap() + .clone(); + create + .set_attributes(group, attrs_1.clone()) + .expect("Failed to set attributes"); assert_eq!( create.list_attributes(group).unwrap(), - serde_json::Value::Object(attrs_1)); + serde_json::Value::Object(attrs_1) + ); let attrs_2 = json!({ "baz": [4, 5, 6], - }).as_object().unwrap().clone(); - create.set_attributes(group, attrs_2).expect("Failed to set attributes"); + }) + .as_object() + .unwrap() + .clone(); + create + .set_attributes(group, attrs_2) + .expect("Failed to set attributes"); assert_eq!( create.list_attributes(group).unwrap(), json!({ "foo": {"bar": 42}, "baz": [4, 5, 6], - })); + }) + ); // Test that key merging is at top-level only. let attrs_2 = json!({ "foo": {"moo": 7}, - }).as_object().unwrap().clone(); - create.set_attributes(group, attrs_2).expect("Failed to set attributes"); + }) + .as_object() + .unwrap() + .clone(); + create + .set_attributes(group, attrs_2) + .expect("Failed to set attributes"); assert_eq!( create.list_attributes(group).unwrap(), json!({ "foo": {"moo": 7}, "baz": [4, 5, 6], - })); + }) + ); let attrs_3 = json!({ "foo": null, "baz": null, - }).as_object().unwrap().clone(); - create.set_attributes(group, attrs_3.clone()).expect("Failed to set attributes"); + }) + .as_object() + .unwrap() + .clone(); + create + .set_attributes(group, attrs_3.clone()) + .expect("Failed to set attributes"); assert_eq!( create.list_attributes(group).unwrap(), - serde_json::Value::Object(attrs_3)); + serde_json::Value::Object(attrs_3) + ); } pub(crate) fn create_block_rw() { @@ -253,18 +293,23 @@ pub(crate) fn create_block_rw() { let block_in = crate::SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); + &block_data, + ); - create.create_dataset("foo/bar", &data_attrs) + create + .create_dataset("foo/bar", &data_attrs) .expect("Failed to create dataset"); - create.write_block("foo/bar", &data_attrs, &block_in) + create + .write_block("foo/bar", &data_attrs, &block_in) .expect("Failed to write block"); let read = create.open_reader(); - let block_out = read.read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) + let block_out = read + .read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) .expect("Failed to read block") .expect("Block is empty"); - let missing_block_out = read.read_block::("foo/bar", &data_attrs, smallvec![0, 0, 1]) + let missing_block_out = read + .read_block::("foo/bar", &data_attrs, smallvec![0, 0, 1]) .expect("Failed to read block"); assert_eq!(block_out.get_data(), &block_data[..]); @@ -275,10 +320,13 @@ pub(crate) fn create_block_rw() { let block_in = crate::SliceDataBlock::new( data_attrs.block_size.clone(), smallvec![0, 0, 0], - &block_data); - create.write_block("foo/bar", &data_attrs, &block_in) + &block_data, + ); + create + .write_block("foo/bar", &data_attrs, &block_in) .expect("Failed to write block"); - let block_out = read.read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) + let block_out = read + .read_block::("foo/bar", &data_attrs, smallvec![0, 0, 0]) .expect("Failed to read block") .expect("Block is empty"); @@ -300,17 +348,18 @@ pub(crate) fn delete_block() { let dataset = "foo/bar"; let block_data: Vec = (0..125_i32).collect(); - let block_in = crate::SliceDataBlock::new( - data_attrs.block_size.clone(), - coord_a.clone(), - &block_data); + let block_in = + crate::SliceDataBlock::new(data_attrs.block_size.clone(), coord_a.clone(), &block_data); - create.create_dataset(dataset, &data_attrs) + create + .create_dataset(dataset, &data_attrs) .expect("Failed to create dataset"); - create.write_block(dataset, &data_attrs, &block_in) + create + .write_block(dataset, &data_attrs, &block_in) .expect("Failed to write block"); - assert!(create.read_block::(dataset, &data_attrs, coord_a.clone()) + assert!(create + .read_block::(dataset, &data_attrs, coord_a.clone()) .expect("Failed to read block") .is_some()); @@ -318,7 +367,8 @@ pub(crate) fn delete_block() { assert!(create.delete_block(dataset, &coord_a).unwrap()); assert!(create.delete_block(dataset, &coord_b).unwrap()); - assert!(create.read_block::(dataset, &data_attrs, coord_a.clone()) + assert!(create + .read_block::(dataset, &data_attrs, coord_a.clone()) .expect("Failed to read block") .is_none()); } diff --git a/tests/integration_test.rs b/tests/integration_test.rs index cfff219..ec32fd1 100644 --- a/tests/integration_test.rs +++ b/tests/integration_test.rs @@ -1,23 +1,20 @@ -use smallvec::smallvec; use rand::{ distributions::Standard, Rng, }; +use smallvec::smallvec; use n5::prelude::*; - -fn test_read_write( - n: &N5, - compression: &CompressionType, - dim: usize, -) where T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default, - rand::distributions::Standard: rand::distributions::Distribution, - VecDataBlock: n5::ReadableDataBlock + n5::WriteableDataBlock, +fn test_read_write(n: &N5, compression: &CompressionType, dim: usize) +where + T: 'static + std::fmt::Debug + ReflectedType + PartialEq + Default, + rand::distributions::Standard: rand::distributions::Distribution, + VecDataBlock: n5::ReadableDataBlock + n5::WriteableDataBlock, { - let block_size: BlockCoord = (1..=dim as u32).rev().map(|d| d*5).collect(); + let block_size: BlockCoord = (1..=dim as u32).rev().map(|d| d * 5).collect(); let data_attrs = DatasetAttributes::new( - (1..=dim as u64).map(|d| d*100).collect(), + (1..=dim as u64).map(|d| d * 100).collect(), block_size.clone(), T::VARIANT, compression.clone(), @@ -26,10 +23,7 @@ fn test_read_write( let rng = rand::thread_rng(); let block_data: Vec = rng.sample_iter(&Standard).take(numel).collect(); - let block_in = SliceDataBlock::new( - block_size, - smallvec![0; dim], - block_data); + let block_in = SliceDataBlock::new(block_size, smallvec![0; dim], block_data); let path_name = "test/dataset/group"; @@ -40,15 +34,13 @@ fn test_read_write( let block_data = block_in.into_data(); - let block_out = n.read_block::(path_name, &data_attrs, smallvec![0; dim]) + let block_out = n + .read_block::(path_name, &data_attrs, smallvec![0; dim]) .expect("Failed to read block") .expect("Block is empty"); assert_eq!(block_out.get_data(), &block_data[..]); - let mut into_block = VecDataBlock::new( - smallvec![0; dim], - smallvec![0; dim], - vec![]); + let mut into_block = VecDataBlock::new(smallvec![0; dim], smallvec![0; dim], vec![]); n.read_block_into(path_name, &data_attrs, smallvec![0; dim], &mut into_block) .expect("Failed to read block") .expect("Block is empty"); @@ -57,11 +49,7 @@ fn test_read_write( n.remove(path_name).unwrap(); } -fn test_all_types( - n: &N5, - compression: &CompressionType, - dim: usize, -) { +fn test_all_types(n: &N5, compression: &CompressionType, dim: usize) { test_read_write::(n, compression, dim); test_read_write::(n, compression, dim); test_read_write::(n, compression, dim); @@ -77,9 +65,12 @@ fn test_all_types( fn test_n5_filesystem_dim(dim: usize) { let dir = tempdir::TempDir::new("rust_n5_integration_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); - test_all_types(&n, &CompressionType::Raw(compression::raw::RawCompression::default()), dim); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); + test_all_types( + &n, + &CompressionType::Raw(compression::raw::RawCompression::default()), + dim, + ); } #[test] @@ -90,22 +81,41 @@ fn test_n5_filesystem_dims() { } fn test_all_compressions(n: &N5) { - test_all_types(n, &CompressionType::Raw(compression::raw::RawCompression::default()), 3); + test_all_types( + n, + &CompressionType::Raw(compression::raw::RawCompression::default()), + 3, + ); #[cfg(feature = "bzip")] - test_all_types(n, &CompressionType::Bzip2(compression::bzip::Bzip2Compression::default()), 3); + test_all_types( + n, + &CompressionType::Bzip2(compression::bzip::Bzip2Compression::default()), + 3, + ); #[cfg(feature = "gzip")] - test_all_types(n, &CompressionType::Gzip(compression::gzip::GzipCompression::default()), 3); + test_all_types( + n, + &CompressionType::Gzip(compression::gzip::GzipCompression::default()), + 3, + ); #[cfg(feature = "lz")] - test_all_types(n, &CompressionType::Lz4(compression::lz::Lz4Compression::default()), 3); + test_all_types( + n, + &CompressionType::Lz4(compression::lz::Lz4Compression::default()), + 3, + ); #[cfg(feature = "xz")] - test_all_types(n, &CompressionType::Xz(compression::xz::XzCompression::default()), 3); + test_all_types( + n, + &CompressionType::Xz(compression::xz::XzCompression::default()), + 3, + ); } #[test] fn test_n5_filesystem_compressions() { let dir = tempdir::TempDir::new("rust_n5_integration_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); test_all_compressions(&n) } diff --git a/tests/ndarray.rs b/tests/ndarray.rs index 851f64f..e87880b 100644 --- a/tests/ndarray.rs +++ b/tests/ndarray.rs @@ -1,21 +1,18 @@ #![cfg(feature = "use_ndarray")] use ndarray::Array; -use smallvec::smallvec; -use rand::Rng; use rand::distributions::Standard; +use rand::Rng; +use smallvec::smallvec; -use n5::prelude::*; use n5::ndarray::prelude::*; - +use n5::prelude::*; #[test] fn test_read_ndarray() { - let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); let block_size = smallvec![3, 4, 2, 1]; let data_attrs = DatasetAttributes::new( @@ -30,7 +27,6 @@ fn test_read_ndarray() { n.create_dataset(path_name, &data_attrs) .expect("Failed to create dataset"); - for k in 0..10 { let z = block_size[3] * k; for j in 0..10 { @@ -53,7 +49,8 @@ fn test_read_ndarray() { let block_in = VecDataBlock::new( block_size.clone(), smallvec![0, u64::from(i), u64::from(j), u64::from(k)], - block_data); + block_data, + ); n.write_block(path_name, &data_attrs, &block_in) .expect("Failed to write block"); } @@ -61,27 +58,50 @@ fn test_read_ndarray() { } let bbox = BoundingBox::new(smallvec![0, 5, 4, 3], smallvec![3, 35, 15, 7]); - let a = n.read_ndarray::(path_name, &data_attrs, &bbox).unwrap(); + let a = n + .read_ndarray::(path_name, &data_attrs, &bbox) + .unwrap(); for z in 0..a.shape()[3] { for y in 0..a.shape()[2] { for x in 0..a.shape()[1] { - assert_eq!(a[[0, x, y, z]], 1005 + x as i32, "0 {} {} {}: {}", x, y, z, a[[0, x, y, z]]); - assert_eq!(a[[1, x, y, z]], 2004 + y as i32, "1 {} {} {}: {}", x, y, z, a[[1, x, y, z]]); - assert_eq!(a[[2, x, y, z]], 3003 + z as i32, "2 {} {} {}: {}", x, y, z, a[[2, x, y, z]]); + assert_eq!( + a[[0, x, y, z]], + 1005 + x as i32, + "0 {} {} {}: {}", + x, + y, + z, + a[[0, x, y, z]] + ); + assert_eq!( + a[[1, x, y, z]], + 2004 + y as i32, + "1 {} {} {}: {}", + x, + y, + z, + a[[1, x, y, z]] + ); + assert_eq!( + a[[2, x, y, z]], + 3003 + z as i32, + "2 {} {} {}: {}", + x, + y, + z, + a[[2, x, y, z]] + ); } } } } - #[test] fn test_read_ndarray_oob() { - let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); let block_size = smallvec![50, 100]; let data_attrs = DatasetAttributes::new( @@ -95,26 +115,22 @@ fn test_read_ndarray_oob() { n.create_dataset(path_name, &data_attrs) .expect("Failed to create dataset"); - let block_in = VecDataBlock::new( - smallvec![1, 1], - smallvec![1, 1], - vec![1]); + let block_in = VecDataBlock::new(smallvec![1, 1], smallvec![1, 1], vec![1]); n.write_block(path_name, &data_attrs, &block_in) .expect("Failed to write block"); let bbox = BoundingBox::new(smallvec![45, 175], smallvec![50, 50]); - let a = n.read_ndarray::(path_name, &data_attrs, &bbox).unwrap(); + let a = n + .read_ndarray::(path_name, &data_attrs, &bbox) + .unwrap(); assert!(a.iter().all(|v| *v == 0)); } - #[test] fn test_write_read_ndarray() { - let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap(); - let n = N5Filesystem::open_or_create(dir.path()) - .expect("Failed to create N5 filesystem"); + let n = N5Filesystem::open_or_create(dir.path()).expect("Failed to create N5 filesystem"); let block_size = smallvec![3, 4, 2, 1]; let data_attrs = DatasetAttributes::new( @@ -130,20 +146,24 @@ fn test_write_read_ndarray() { let rng = rand::thread_rng(); let arr_shape = [3, 35, 15, 7]; - let array: Array = Array::from_iter( - rng.sample_iter(&Standard) - .take(arr_shape.iter().product())) - .into_shape(arr_shape.clone()).unwrap() - .into_dyn(); + let array: Array = + Array::from_iter(rng.sample_iter(&Standard).take(arr_shape.iter().product())) + .into_shape(arr_shape.clone()) + .unwrap() + .into_dyn(); let offset = smallvec![0, 5, 4, 3]; - n.write_ndarray(path_name, &data_attrs, offset.clone(), &array, 0).unwrap(); + n.write_ndarray(path_name, &data_attrs, offset.clone(), &array, 0) + .unwrap(); let bbox = BoundingBox::new(offset, arr_shape.iter().map(|s| *s as u64).collect()); - let a = n.read_ndarray::("test/dataset/group", &data_attrs, &bbox).unwrap(); + let a = n + .read_ndarray::("test/dataset/group", &data_attrs, &bbox) + .unwrap(); // Also test c-order. let mut a_c = Array::zeros(bbox.size_ndarray_shape().as_slice()); - n.read_ndarray_into::("test/dataset/group", &data_attrs, &bbox, a_c.view_mut()).unwrap(); + n.read_ndarray_into::("test/dataset/group", &data_attrs, &bbox, a_c.view_mut()) + .unwrap(); assert_eq!(array, a); assert_eq!(array, a_c);