Skip to content

Commit

Permalink
Merge branch 'ref-workflow-scripts' into add-workflow-description
Browse files Browse the repository at this point in the history
  • Loading branch information
teor2345 authored Dec 12, 2023
2 parents 33ab9da + 263f002 commit 8250b60
Show file tree
Hide file tree
Showing 54 changed files with 1,029 additions and 404 deletions.
3 changes: 3 additions & 0 deletions .cargo/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ rustflags = [
"-Wmissing_docs",

# TODOs:
# Fix this lint eventually.
"-Aclippy::result_large_err",

# `cargo fix` might help do these fixes,
# or add a config.toml to sub-directories which should allow these lints,
# or try allowing the lint in the specific module (lib.rs doesn't seem to work in some cases)
Expand Down
36 changes: 22 additions & 14 deletions .github/workflows/ci-build-crates.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,18 @@ jobs:
# This step is meant to dynamically create a JSON containing the values of each crate
# available in this repo in the root directory. We use `cargo tree` to accomplish this task.
#
# The result from `cargo tree` is then transform to JSON values between double quotes,
# and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable.
# The result from `cargo tree` is then sorted so the longest job (zebrad) runs first,
# transformed to JSON values between double quotes, and separated by commas,
# then added to a `crates.txt`.
#
# A JSON object is created and assigned to a $MATRIX variable, which is use to create an output
# named `matrix`, which is then used as the input in following steps,
# A JSON object is created and assigned to a $MATRIX variable, which is use to create an
# output named `matrix`, which is then used as the input in following steps,
# using ` ${{ fromJson(needs.matrix.outputs.matrix) }}`
- id: set-matrix
name: Dynamically build crates JSON
run: |
TEMP_DIR=$(mktemp -d)
echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt
cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | LC_ALL=C sort --reverse | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//' > $TEMP_DIR/crates.txt
MATRIX=$( (
echo '{ "crate" : ['
echo "$(cat $TEMP_DIR/crates.txt)"
Expand Down Expand Up @@ -114,8 +115,9 @@ jobs:
# Some of these builds take more than 14GB disk space
runs-on: ubuntu-latest-m
strategy:
# avoid rate-limit errors by only launching a few of these jobs at a time
max-parallel: 2
# avoid rate-limit errors by only launching a few of these jobs at a time,
# but still finish in a similar time to the longest tests
max-parallel: 4
fail-fast: true
matrix: ${{ fromJson(needs.matrix.outputs.matrix) }}

Expand All @@ -137,21 +139,27 @@ jobs:
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
# We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument,
# but it's faster to run these commands sequentially, so they can re-use the local cargo cache.
#
# Some Zebra crates do not have any features, and most don't have any default features.
- name: Build ${{ matrix.crate }} crate with no default features
# We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument,
# but it's faster to run these commands sequentially, so they can re-use the local cargo cache.
#
# Some Zebra crates do not have any features, and most don't have any default features.
# Some targets activate features, but we still need to be able to build without them.
- name: Build ${{ matrix.crate }} crate with default features
run: |
cargo clippy --package ${{ matrix.crate }} -- -D warnings
cargo build --package ${{ matrix.crate }}
- name: Build ${{ matrix.crate }} crate with no default features and all targets
run: |
cargo clippy --package ${{ matrix.crate }} --no-default-features --all-targets -- -D warnings
cargo build --package ${{ matrix.crate }} --no-default-features --all-targets
- name: Build ${{ matrix.crate }} crate with default features
- name: Build ${{ matrix.crate }} crate with default features and all targets
run: |
cargo clippy --package ${{ matrix.crate }} --all-targets -- -D warnings
cargo build --package ${{ matrix.crate }} --all-targets
- name: Build ${{ matrix.crate }} crate with all features
- name: Build ${{ matrix.crate }} crate with all features and all targets
run: |
cargo clippy --package ${{ matrix.crate }} --all-features --all-targets -- -D warnings
cargo build --package ${{ matrix.crate }} --all-features --all-targets
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/ci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:

- name: Rust files
id: changed-files-rust
uses: tj-actions/[email protected].0
uses: tj-actions/[email protected].1
with:
files: |
**/*.rs
Expand All @@ -56,7 +56,7 @@ jobs:
- name: Workflow files
id: changed-files-workflows
uses: tj-actions/[email protected].0
uses: tj-actions/[email protected].1
with:
files: |
.github/workflows/*.yml
Expand Down Expand Up @@ -167,7 +167,7 @@ jobs:
needs: changed-files
steps:
- uses: actions/[email protected]
- uses: plettich/action-codespell@master
- uses: codespell-project/actions-codespell@v2.0
with:
github_token: ${{ secrets.github_token }}
level: warning
only_warn: 1

17 changes: 13 additions & 4 deletions .github/workflows/ci-unit-tests-docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -135,17 +135,26 @@ jobs:
# If some tests hang, add "-- --nocapture" for just that test, or for all the tests.
#
# TODO: move this test command into entrypoint.sh
# add a separate experimental workflow job if this job is slow
- name: Run zebrad tests
env:
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
run: |
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --workspace -- --include-ignored
# Currently GitHub doesn't allow empty variables
# Run unit, basic acceptance tests, and ignored tests with experimental features.
#
# TODO: move this test command into entrypoint.sh
- name: Run zebrad tests with experimental features
env:
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
run: |
# GitHub doesn't allow empty variables
if [[ -n "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" && "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" != " " ]]; then
docker run -e NETWORK --name zebrad-tests-experimental --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.EXPERIMENTAL_FEATURES }} " --workspace -- --include-ignored
else
echo "Experimental builds are disabled, set RUST_EXPERIMENTAL_FEATURES in GitHub actions variables to enable them"
fi
env:
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
# Run state tests with fake activation heights.
#
Expand Down
40 changes: 20 additions & 20 deletions .github/workflows/scripts/gcp-get-cached-disks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# and finally checks other branches if needed. The selected image is used for
# setting up the environment in a CI/CD pipeline.

set -euo pipefail
set -eo pipefail

# Function to find and report a cached disk image
find_cached_disk_image() {
Expand All @@ -16,15 +16,15 @@ find_cached_disk_image() {

disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)

# Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout
if [[ -n "${disk_name}" ]]; then
echo "Found ${git_source} Disk: ${disk_name}"
echo "Found ${git_source} Disk: ${disk_name}" >&2
disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)")
echo "Description: ${disk_description}"
echo "Description: ${disk_description}" >&2
echo "${disk_name}" # This is the actual return value when a disk is found
else
echo "No ${git_source} disk found."
echo "No ${git_source} disk found." >&2
fi

echo "${disk_name}"
}

# Extract local state version
Expand All @@ -41,17 +41,19 @@ fi

# Find the most suitable cached disk image
echo "Finding the most suitable cached disk image..."
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit")

if [[ -z "${CACHED_DISK_NAME}" && "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then
MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch")
fi

if [[ -z "${CACHED_DISK_NAME}" ]]; then
ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch")
# Try to find a cached disk image from the current commit
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit")
# If no cached disk image is found, try to find one from the main branch
if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then
MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch")
# Else, try to find one from any branch
else
ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch")
fi
fi

# Handle case where no suitable disk image is found
Expand All @@ -66,7 +68,5 @@ echo "Selected Disk: ${CACHED_DISK_NAME}"

# Exporting variables for subsequent steps
echo "Exporting variables for subsequent steps..."
echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}"
echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}"
echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
echo "DISK_OPTION=image=${CACHED_DISK_NAME,}" >> "${GITHUB_ENV}"
export CACHED_DISK_NAME="${CACHED_DISK_NAME}"
export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}"
2 changes: 1 addition & 1 deletion .github/workflows/sub-build-docker-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ jobs:
# Automatic tag management and OCI Image Format Specification for labels
- name: Docker meta
id: meta
uses: docker/metadata-action@v5.0.0
uses: docker/metadata-action@v5.3.0
with:
# list of Docker images to use as base name for tags
images: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/sub-build-lightwalletd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ jobs:
# Automatic tag management and OCI Image Format Specification for labels
- name: Docker meta
id: meta
uses: docker/metadata-action@v5.0.0
uses: docker/metadata-action@v5.3.0
with:
# list of Docker images to use as base name for tags
images: |
Expand Down
9 changes: 7 additions & 2 deletions .github/workflows/sub-deploy-integration-tests-gcp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,12 @@ jobs:
DISK_SUFFIX: ${{ inputs.disk_suffix }}
PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }}
run: |
./.github/workflows/scripts/gcp-get-cached-disks.sh
source ./.github/workflows/scripts/gcp-get-cached-disks.sh
echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}"
echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}"
echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
echo "DISK_OPTION=image=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
# Create a Compute Engine virtual machine and attach a cached state disk using the
# $CACHED_DISK_NAME variable as the source image to populate the disk cached state
Expand All @@ -207,7 +212,7 @@ jobs:
--boot-disk-type pd-ssd \
--image-project=cos-cloud \
--image-family=cos-stable \
--create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \
--create-disk=${DISK_OPTION},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \
--container-image=gcr.io/google-containers/busybox \
--machine-type ${{ vars.GCP_LARGE_MACHINE }} \
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
Expand Down
1 change: 1 addition & 0 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -5798,6 +5798,7 @@ name = "zebra-scan"
version = "0.1.0-alpha.0"
dependencies = [
"bls12_381",
"chrono",
"color-eyre",
"ff",
"group",
Expand Down
8 changes: 6 additions & 2 deletions book/src/user/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ Follow the [Docker or compilation instructions](https://zebra.zfnd.org/index.htm

To compile Zebra from source, you will need to [install some dependencies.](https://zebra.zfnd.org/index.html#building-zebra).


## Alternative Compilation Methods

### Compiling Manually from git
Expand Down Expand Up @@ -58,7 +57,12 @@ If you're having trouble with:

- use `cargo install` without `--locked` to build with the latest versions of each dependency

#### Optional Tor feature
## Experimental Shielded Scanning feature

- install the `rocksdb-tools` or `rocksdb` packages to get the `ldb` binary, which allows expert users to
[query the scanner database](https://zebra.zfnd.org/user/shielded-scan.html). This binary is sometimes called `rocksdb_ldb`.

## Optional Tor feature

- **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature.
If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`.
Expand Down
5 changes: 3 additions & 2 deletions book/src/user/run.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@ structure, and documentation for all of the config options can be found

You can run Zebra as a:

- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html), and
- experimental [mining backend](https://zebra.zfnd.org/user/mining.html).
- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html),
- [mining backend](https://zebra.zfnd.org/user/mining.html), or
- experimental [Sapling shielded transaction scanner](https://zebra.zfnd.org/user/shielded-scan.html).

## Supported versions

Expand Down
36 changes: 36 additions & 0 deletions book/src/user/shielded-scan.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Zebra Shielded Scanning

This document describes how expert users can try Zebra's shielded scanning feature.

For now, we only support Sapling, and only store transaction IDs in the scanner results database.
Ongoing development is tracked in issue [#7728](https://github.com/ZcashFoundation/zebra/issues/7728).

## Important Security Warning

Zebra's shielded scanning feature has known security issues. It is for experimental use only.

Do not use regular or sensitive viewing keys with Zebra's experimental scanning feature. Do not use this
feature on a shared machine. We suggest generating new keys for experimental use.

## Build & Install

Using the `shielded-scan` feature. TODO: add examples, document the feature in zebrad/src/lib.rs.

## Configuration

In `zebrad.toml`, use:
- the `[shielded_scan]` table for database settings, and
- the `[shielded_scan.sapling_keys_to_scan]` table for diversifiable full viewing keys.

TODO: add a definition for DFVK, link to its format, and add examples and links to keys and database settings.

## Running Sapling Scanning

Launch Zebra and wait for 12-24 hours.

## Expert: Querying Raw Sapling Scanning Results

TODO: Copy these instructions and examples here:
- https://github.com/ZcashFoundation/zebra/issues/8046#issuecomment-1844772654

Database paths are different on Linux, macOS, and Windows.
4 changes: 3 additions & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ RUN apt-get -qq update && \
ca-certificates \
protobuf-compiler \
rsync \
rocksdb-tools \
; \
rm -rf /var/lib/apt/lists/* /tmp/*

Expand Down Expand Up @@ -181,7 +182,8 @@ COPY --from=release /entrypoint.sh /

RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates
ca-certificates \
rocksdb-tools

# Config settings for zebrad
ARG FEATURES
Expand Down
2 changes: 1 addition & 1 deletion zebra-chain/src/amount.rs
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ impl Error {
/// -MAX_MONEY..=MAX_MONEY,
/// );
/// ```
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)]
pub struct NegativeAllowed;

impl Constraint for NegativeAllowed {
Expand Down
2 changes: 1 addition & 1 deletion zebra-chain/src/block/hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use proptest_derive::Arbitrary;
/// Note: Zebra displays transaction and block hashes in big-endian byte-order,
/// following the u256 convention set by Bitcoin and zcashd.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))]
pub struct Hash(pub [u8; 32]);

impl Hash {
Expand Down
1 change: 1 addition & 0 deletions zebra-chain/src/block/height.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ pub mod json_conversion;
/// There are multiple formats for serializing a height, so we don't implement
/// `ZcashSerialize` or `ZcashDeserialize` for `Height`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))]
pub struct Height(pub u32);

#[derive(Error, Debug)]
Expand Down
2 changes: 1 addition & 1 deletion zebra-chain/src/block/merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ use proptest_derive::Arbitrary;
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))]
pub struct Root(pub [u8; 32]);

impl fmt::Debug for Root {
Expand Down
2 changes: 1 addition & 1 deletion zebra-chain/src/fmt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ where

/// Wrapper to override `Debug`, redirecting it to hex-encode the type.
/// The type must implement `AsRef<[u8]>`.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
#[serde(transparent)]
pub struct HexDebug<T: AsRef<[u8]>>(pub T);
Expand Down
Loading

0 comments on commit 8250b60

Please sign in to comment.